2 * Copyright (c) 2004-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <sys/param.h>
33 #include <sys/mcache.h>
34 #include <sys/socket.h>
35 #include <kern/debug.h>
36 #include <libkern/OSAtomic.h>
37 #include <kern/kalloc.h>
40 #include <netinet/in.h>
41 #include <netinet/ip_var.h>
43 #include "net/net_str_id.h"
45 /* mbuf flags visible to KPI clients; do not add private flags here */
46 static const mbuf_flags_t mbuf_flags_mask
= (MBUF_EXT
| MBUF_PKTHDR
| MBUF_EOR
|
47 MBUF_LOOP
| MBUF_BCAST
| MBUF_MCAST
| MBUF_FRAG
| MBUF_FIRSTFRAG
|
48 MBUF_LASTFRAG
| MBUF_PROMISC
| MBUF_HASFCS
);
50 /* Unalterable mbuf flags */
51 static const mbuf_flags_t mbuf_cflags_mask
= (MBUF_EXT
);
53 #define MAX_MBUF_TX_COMPL_FUNC 32
55 mbuf_tx_compl_table
[MAX_MBUF_TX_COMPL_FUNC
];
56 extern lck_rw_t
*mbuf_tx_compl_tbl_lock
;
57 u_int32_t mbuf_tx_compl_index
= 0;
59 #if (DEVELOPMENT || DEBUG)
60 int mbuf_tx_compl_debug
= 0;
61 SInt64 mbuf_tx_compl_outstanding
__attribute__((aligned(8))) = 0;
62 u_int64_t mbuf_tx_compl_aborted
__attribute__((aligned(8))) = 0;
64 SYSCTL_DECL(_kern_ipc
);
65 SYSCTL_NODE(_kern_ipc
, OID_AUTO
, mbtxcf
,
66 CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "");
67 SYSCTL_INT(_kern_ipc_mbtxcf
, OID_AUTO
, debug
,
68 CTLFLAG_RW
| CTLFLAG_LOCKED
, &mbuf_tx_compl_debug
, 0, "");
69 SYSCTL_INT(_kern_ipc_mbtxcf
, OID_AUTO
, index
,
70 CTLFLAG_RD
| CTLFLAG_LOCKED
, &mbuf_tx_compl_index
, 0, "");
71 SYSCTL_QUAD(_kern_ipc_mbtxcf
, OID_AUTO
, oustanding
,
72 CTLFLAG_RD
| CTLFLAG_LOCKED
, &mbuf_tx_compl_outstanding
, "");
73 SYSCTL_QUAD(_kern_ipc_mbtxcf
, OID_AUTO
, aborted
,
74 CTLFLAG_RD
| CTLFLAG_LOCKED
, &mbuf_tx_compl_aborted
, "");
75 #endif /* (DEBUG || DEVELOPMENT) */
78 mbuf_data(mbuf_t mbuf
)
84 mbuf_datastart(mbuf_t mbuf
)
86 if (mbuf
->m_flags
& M_EXT
) {
87 return mbuf
->m_ext
.ext_buf
;
89 if (mbuf
->m_flags
& M_PKTHDR
) {
90 return mbuf
->m_pktdat
;
96 mbuf_setdata(mbuf_t mbuf
, void *data
, size_t len
)
98 size_t start
= (size_t)((char *)mbuf_datastart(mbuf
));
99 size_t maxlen
= mbuf_maxlen(mbuf
);
101 if ((size_t)data
< start
|| ((size_t)data
) + len
> start
+ maxlen
) {
111 mbuf_align_32(mbuf_t mbuf
, size_t len
)
113 if ((mbuf
->m_flags
& M_EXT
) != 0 && m_mclhasreference(mbuf
)) {
116 mbuf
->m_data
= mbuf_datastart(mbuf
);
118 ((mbuf_trailingspace(mbuf
) - len
) & ~(sizeof(u_int32_t
) - 1));
124 * This function is used to provide mcl_to_paddr via symbol indirection,
125 * please avoid any change in behavior or remove the indirection in
126 * config/Unsupported*
129 mbuf_data_to_physical(void *ptr
)
131 return (addr64_t
)mcl_to_paddr(ptr
);
135 mbuf_get(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
137 /* Must set *mbuf to NULL in failure case */
138 *mbuf
= m_get(how
, type
);
140 return *mbuf
== NULL
? ENOMEM
: 0;
144 mbuf_gethdr(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
146 /* Must set *mbuf to NULL in failure case */
147 *mbuf
= m_gethdr(how
, type
);
149 return *mbuf
== NULL
? ENOMEM
: 0;
153 mbuf_attachcluster(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
,
154 caddr_t extbuf
, void (*extfree
)(caddr_t
, u_int
, caddr_t
),
155 size_t extsize
, caddr_t extarg
)
157 if (mbuf
== NULL
|| extbuf
== NULL
|| extfree
== NULL
|| extsize
== 0) {
161 if ((*mbuf
= m_clattach(*mbuf
, type
, extbuf
,
162 extfree
, extsize
, extarg
, how
, 0)) == NULL
) {
170 mbuf_ring_cluster_alloc(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
,
171 void (*extfree
)(caddr_t
, u_int
, caddr_t
), size_t *size
)
173 caddr_t extbuf
= NULL
;
176 if (mbuf
== NULL
|| extfree
== NULL
|| size
== NULL
|| *size
== 0) {
180 if ((err
= mbuf_alloccluster(how
, size
, &extbuf
)) != 0) {
184 if ((*mbuf
= m_clattach(*mbuf
, type
, extbuf
,
185 extfree
, *size
, NULL
, how
, 1)) == NULL
) {
186 mbuf_freecluster(extbuf
, *size
);
194 mbuf_ring_cluster_is_active(mbuf_t mbuf
)
196 return m_ext_paired_is_active(mbuf
);
200 mbuf_ring_cluster_activate(mbuf_t mbuf
)
202 if (mbuf_ring_cluster_is_active(mbuf
)) {
206 m_ext_paired_activate(mbuf
);
211 mbuf_cluster_set_prop(mbuf_t mbuf
, u_int32_t oldprop
, u_int32_t newprop
)
213 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_EXT
)) {
217 return m_ext_set_prop(mbuf
, oldprop
, newprop
) ? 0 : EBUSY
;
221 mbuf_cluster_get_prop(mbuf_t mbuf
, u_int32_t
*prop
)
223 if (mbuf
== NULL
|| prop
== NULL
|| !(mbuf
->m_flags
& M_EXT
)) {
227 *prop
= m_ext_get_prop(mbuf
);
232 mbuf_alloccluster(mbuf_how_t how
, size_t *size
, caddr_t
*addr
)
234 if (size
== NULL
|| *size
== 0 || addr
== NULL
) {
240 /* Jumbo cluster pool not available? */
241 if (*size
> MBIGCLBYTES
&& njcl
== 0) {
245 if (*size
<= MCLBYTES
&& (*addr
= m_mclalloc(how
)) != NULL
) {
247 } else if (*size
> MCLBYTES
&& *size
<= MBIGCLBYTES
&&
248 (*addr
= m_bigalloc(how
)) != NULL
) {
250 } else if (*size
> MBIGCLBYTES
&& *size
<= M16KCLBYTES
&&
251 (*addr
= m_16kalloc(how
)) != NULL
) {
265 mbuf_freecluster(caddr_t addr
, size_t size
)
267 if (size
!= MCLBYTES
&& size
!= MBIGCLBYTES
&& size
!= M16KCLBYTES
) {
268 panic("%s: invalid size (%ld) for cluster %p", __func__
,
272 if (size
== MCLBYTES
) {
274 } else if (size
== MBIGCLBYTES
) {
275 m_bigfree(addr
, MBIGCLBYTES
, NULL
);
276 } else if (njcl
> 0) {
277 m_16kfree(addr
, M16KCLBYTES
, NULL
);
279 panic("%s: freeing jumbo cluster to an empty pool", __func__
);
284 mbuf_getcluster(mbuf_how_t how
, mbuf_type_t type
, size_t size
, mbuf_t
*mbuf
)
286 /* Must set *mbuf to NULL in failure case */
294 *mbuf
= m_get(how
, type
);
301 * At the time this code was written, m_{mclget,mbigget,m16kget}
302 * would always return the same value that was passed in to it.
304 if (size
== MCLBYTES
) {
305 *mbuf
= m_mclget(*mbuf
, how
);
306 } else if (size
== MBIGCLBYTES
) {
307 *mbuf
= m_mbigget(*mbuf
, how
);
308 } else if (size
== M16KCLBYTES
) {
310 *mbuf
= m_m16kget(*mbuf
, how
);
312 /* Jumbo cluster pool not available? */
320 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0) {
324 if (created
&& error
!= 0) {
332 mbuf_mclget(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
334 /* Must set *mbuf to NULL in failure case */
341 error
= mbuf_get(how
, type
, mbuf
);
349 * At the time this code was written, m_mclget would always
350 * return the same value that was passed in to it.
352 *mbuf
= m_mclget(*mbuf
, how
);
354 if (created
&& ((*mbuf
)->m_flags
& M_EXT
) == 0) {
358 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0) {
366 mbuf_getpacket(mbuf_how_t how
, mbuf_t
*mbuf
)
368 /* Must set *mbuf to NULL in failure case */
371 *mbuf
= m_getpacket_how(how
);
374 if (how
== MBUF_WAITOK
) {
385 * This function is used to provide m_free via symbol indirection, please avoid
386 * any change in behavior or remove the indirection in config/Unsupported*
389 mbuf_free(mbuf_t mbuf
)
395 * This function is used to provide m_freem via symbol indirection, please avoid
396 * any change in behavior or remove the indirection in config/Unsupported*
399 mbuf_freem(mbuf_t mbuf
)
405 mbuf_freem_list(mbuf_t mbuf
)
407 return m_freem_list(mbuf
);
411 mbuf_leadingspace(const mbuf_t mbuf
)
413 return M_LEADINGSPACE(mbuf
);
417 * This function is used to provide m_trailingspace via symbol indirection,
418 * please avoid any change in behavior or remove the indirection in
419 * config/Unsupported*
422 mbuf_trailingspace(const mbuf_t mbuf
)
424 return M_TRAILINGSPACE(mbuf
);
429 mbuf_copym(const mbuf_t src
, size_t offset
, size_t len
,
430 mbuf_how_t how
, mbuf_t
*new_mbuf
)
432 /* Must set *mbuf to NULL in failure case */
433 *new_mbuf
= m_copym(src
, offset
, len
, how
);
435 return *new_mbuf
== NULL
? ENOMEM
: 0;
439 mbuf_dup(const mbuf_t src
, mbuf_how_t how
, mbuf_t
*new_mbuf
)
441 /* Must set *new_mbuf to NULL in failure case */
442 *new_mbuf
= m_dup(src
, how
);
444 return *new_mbuf
== NULL
? ENOMEM
: 0;
448 mbuf_prepend(mbuf_t
*orig
, size_t len
, mbuf_how_t how
)
450 /* Must set *orig to NULL in failure case */
451 *orig
= m_prepend_2(*orig
, len
, how
, 0);
453 return *orig
== NULL
? ENOMEM
: 0;
457 mbuf_split(mbuf_t src
, size_t offset
,
458 mbuf_how_t how
, mbuf_t
*new_mbuf
)
460 /* Must set *new_mbuf to NULL in failure case */
461 *new_mbuf
= m_split(src
, offset
, how
);
463 return *new_mbuf
== NULL
? ENOMEM
: 0;
467 mbuf_pullup(mbuf_t
*mbuf
, size_t len
)
469 /* Must set *mbuf to NULL in failure case */
470 *mbuf
= m_pullup(*mbuf
, len
);
472 return *mbuf
== NULL
? ENOMEM
: 0;
476 mbuf_pulldown(mbuf_t src
, size_t *offset
, size_t len
, mbuf_t
*location
)
478 /* Must set *location to NULL in failure case */
480 *location
= m_pulldown(src
, *offset
, len
, &new_offset
);
481 *offset
= new_offset
;
483 return *location
== NULL
? ENOMEM
: 0;
487 * This function is used to provide m_adj via symbol indirection, please avoid
488 * any change in behavior or remove the indirection in config/Unsupported*
491 mbuf_adj(mbuf_t mbuf
, int len
)
497 mbuf_adjustlen(mbuf_t m
, int amount
)
499 /* Verify m_len will be valid after adding amount */
501 int used
= (size_t)mbuf_data(m
) - (size_t)mbuf_datastart(m
) +
504 if ((size_t)(amount
+ used
) > mbuf_maxlen(m
)) {
507 } else if (-amount
> m
->m_len
) {
516 mbuf_concatenate(mbuf_t dst
, mbuf_t src
)
524 /* return dst as is in the current implementation */
528 mbuf_copydata(const mbuf_t m0
, size_t off
, size_t len
, void *out_data
)
530 /* Copied m_copydata, added error handling (don't just panic) */
538 if (off
< (size_t)m
->m_len
) {
548 count
= m
->m_len
- off
> len
? len
: m
->m_len
- off
;
549 bcopy(mtod(m
, caddr_t
) + off
, out_data
, count
);
551 out_data
= ((char *)out_data
) + count
;
560 mbuf_mclhasreference(mbuf_t mbuf
)
562 if ((mbuf
->m_flags
& M_EXT
)) {
563 return m_mclhasreference(mbuf
);
572 mbuf_next(const mbuf_t mbuf
)
578 mbuf_setnext(mbuf_t mbuf
, mbuf_t next
)
580 if (next
&& ((next
)->m_nextpkt
!= NULL
||
581 (next
)->m_type
== MT_FREE
)) {
590 mbuf_nextpkt(const mbuf_t mbuf
)
592 return mbuf
->m_nextpkt
;
596 mbuf_setnextpkt(mbuf_t mbuf
, mbuf_t nextpkt
)
598 mbuf
->m_nextpkt
= nextpkt
;
602 mbuf_len(const mbuf_t mbuf
)
608 mbuf_setlen(mbuf_t mbuf
, size_t len
)
614 mbuf_maxlen(const mbuf_t mbuf
)
616 if (mbuf
->m_flags
& M_EXT
) {
617 return mbuf
->m_ext
.ext_size
;
619 return &mbuf
->m_dat
[MLEN
] - ((char *)mbuf_datastart(mbuf
));
623 mbuf_type(const mbuf_t mbuf
)
629 mbuf_settype(mbuf_t mbuf
, mbuf_type_t new_type
)
631 if (new_type
== MBUF_TYPE_FREE
) {
635 m_mchtype(mbuf
, new_type
);
641 mbuf_flags(const mbuf_t mbuf
)
643 return mbuf
->m_flags
& mbuf_flags_mask
;
647 mbuf_setflags(mbuf_t mbuf
, mbuf_flags_t flags
)
650 mbuf_flags_t oflags
= mbuf
->m_flags
;
653 * 1. Return error if public but un-alterable flags are changed
655 * 2. Return error if bits other than public flags are set in passed
657 * Please note that private flag bits must be passed as reset by
658 * kexts, as they must use mbuf_flags KPI to get current set of
659 * mbuf flags and mbuf_flags KPI does not expose private flags.
661 if ((flags
^ oflags
) & mbuf_cflags_mask
) {
663 } else if (flags
& ~mbuf_flags_mask
) {
666 mbuf
->m_flags
= flags
| (mbuf
->m_flags
& ~mbuf_flags_mask
);
668 * If M_PKTHDR bit has changed, we have work to do;
669 * m_reinit() will take care of setting/clearing the
670 * bit, as well as the rest of bookkeeping.
672 if ((oflags
^ mbuf
->m_flags
) & M_PKTHDR
) {
673 mbuf
->m_flags
^= M_PKTHDR
; /* restore */
675 (mbuf
->m_flags
& M_PKTHDR
) ? 0 : 1);
683 mbuf_setflags_mask(mbuf_t mbuf
, mbuf_flags_t flags
, mbuf_flags_t mask
)
687 if (mask
& (~mbuf_flags_mask
| mbuf_cflags_mask
)) {
690 mbuf_flags_t oflags
= mbuf
->m_flags
;
691 mbuf
->m_flags
= (flags
& mask
) | (mbuf
->m_flags
& ~mask
);
693 * If M_PKTHDR bit has changed, we have work to do;
694 * m_reinit() will take care of setting/clearing the
695 * bit, as well as the rest of bookkeeping.
697 if ((oflags
^ mbuf
->m_flags
) & M_PKTHDR
) {
698 mbuf
->m_flags
^= M_PKTHDR
; /* restore */
700 (mbuf
->m_flags
& M_PKTHDR
) ? 0 : 1);
708 mbuf_copy_pkthdr(mbuf_t dest
, const mbuf_t src
)
710 if (((src
)->m_flags
& M_PKTHDR
) == 0) {
714 m_copy_pkthdr(dest
, src
);
720 mbuf_pkthdr_len(const mbuf_t mbuf
)
722 if (((mbuf
)->m_flags
& M_PKTHDR
) == 0) {
726 * While we Assert for development or debug builds,
727 * also make sure we never return negative length
730 ASSERT(mbuf
->m_pkthdr
.len
>= 0);
731 if (mbuf
->m_pkthdr
.len
< 0) {
734 return mbuf
->m_pkthdr
.len
;
737 __private_extern__
size_t
738 mbuf_pkthdr_maxlen(mbuf_t m
)
744 maxlen
+= mbuf_maxlen(n
);
751 mbuf_pkthdr_setlen(mbuf_t mbuf
, size_t len
)
753 if (len
> INT32_MAX
) {
757 mbuf
->m_pkthdr
.len
= len
;
761 mbuf_pkthdr_adjustlen(mbuf_t mbuf
, int amount
)
763 mbuf
->m_pkthdr
.len
+= amount
;
767 mbuf_pkthdr_rcvif(const mbuf_t mbuf
)
770 * If we reference count ifnets, we should take a reference here
773 return mbuf
->m_pkthdr
.rcvif
;
777 mbuf_pkthdr_setrcvif(mbuf_t mbuf
, ifnet_t ifnet
)
779 /* May want to walk ifnet list to determine if interface is valid */
780 mbuf
->m_pkthdr
.rcvif
= (struct ifnet
*)ifnet
;
785 mbuf_pkthdr_header(const mbuf_t mbuf
)
787 return mbuf
->m_pkthdr
.pkt_hdr
;
791 mbuf_pkthdr_setheader(mbuf_t mbuf
, void *header
)
793 mbuf
->m_pkthdr
.pkt_hdr
= (void*)header
;
797 mbuf_inbound_modified(mbuf_t mbuf
)
799 /* Invalidate hardware generated checksum flags */
800 mbuf
->m_pkthdr
.csum_flags
= 0;
804 mbuf_outbound_finalize(struct mbuf
*m
, u_int32_t pf
, size_t o
)
806 /* Generate the packet in software, client needs it */
809 (void) in_finalize_cksum(m
, o
, m
->m_pkthdr
.csum_flags
);
815 * Checksum offload should not have been enabled when
816 * extension headers exist; indicate that the callee
817 * should skip such case by setting optlen to -1.
819 (void) in6_finalize_cksum(m
, o
, -1, -1, m
->m_pkthdr
.csum_flags
);
833 mbuf
->m_pkthdr
.csum_flags
|= CSUM_VLAN_TAG_VALID
;
834 mbuf
->m_pkthdr
.vlan_tag
= vlan
;
844 if ((mbuf
->m_pkthdr
.csum_flags
& CSUM_VLAN_TAG_VALID
) == 0) {
845 return ENXIO
; // No vlan tag set
847 *vlan
= mbuf
->m_pkthdr
.vlan_tag
;
856 mbuf
->m_pkthdr
.csum_flags
&= ~CSUM_VLAN_TAG_VALID
;
857 mbuf
->m_pkthdr
.vlan_tag
= 0;
862 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags
=
863 MBUF_CSUM_REQ_IP
| MBUF_CSUM_REQ_TCP
| MBUF_CSUM_REQ_UDP
|
864 MBUF_CSUM_PARTIAL
| MBUF_CSUM_REQ_TCPIPV6
| MBUF_CSUM_REQ_UDPIPV6
;
867 mbuf_set_csum_requested(
869 mbuf_csum_request_flags_t request
,
872 request
&= mbuf_valid_csum_request_flags
;
873 mbuf
->m_pkthdr
.csum_flags
=
874 (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | request
;
875 mbuf
->m_pkthdr
.csum_data
= value
;
880 static const mbuf_tso_request_flags_t mbuf_valid_tso_request_flags
=
881 MBUF_TSO_IPV4
| MBUF_TSO_IPV6
;
884 mbuf_get_tso_requested(
886 mbuf_tso_request_flags_t
*request
,
889 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
890 request
== NULL
|| value
== NULL
) {
894 *request
= mbuf
->m_pkthdr
.csum_flags
;
895 *request
&= mbuf_valid_tso_request_flags
;
896 if (*request
&& value
!= NULL
) {
897 *value
= mbuf
->m_pkthdr
.tso_segsz
;
904 mbuf_get_csum_requested(
906 mbuf_csum_request_flags_t
*request
,
909 *request
= mbuf
->m_pkthdr
.csum_flags
;
910 *request
&= mbuf_valid_csum_request_flags
;
912 *value
= mbuf
->m_pkthdr
.csum_data
;
919 mbuf_clear_csum_requested(
922 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
923 mbuf
->m_pkthdr
.csum_data
= 0;
928 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags
=
929 MBUF_CSUM_DID_IP
| MBUF_CSUM_IP_GOOD
| MBUF_CSUM_DID_DATA
|
930 MBUF_CSUM_PSEUDO_HDR
| MBUF_CSUM_PARTIAL
;
933 mbuf_set_csum_performed(
935 mbuf_csum_performed_flags_t performed
,
938 performed
&= mbuf_valid_csum_performed_flags
;
939 mbuf
->m_pkthdr
.csum_flags
=
940 (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | performed
;
941 mbuf
->m_pkthdr
.csum_data
= value
;
947 mbuf_get_csum_performed(
949 mbuf_csum_performed_flags_t
*performed
,
953 mbuf
->m_pkthdr
.csum_flags
& mbuf_valid_csum_performed_flags
;
954 *value
= mbuf
->m_pkthdr
.csum_data
;
960 mbuf_clear_csum_performed(
963 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
964 mbuf
->m_pkthdr
.csum_data
= 0;
970 mbuf_inet_cksum(mbuf_t mbuf
, int protocol
, u_int32_t offset
, u_int32_t length
,
973 if (mbuf
== NULL
|| length
== 0 || csum
== NULL
||
974 (u_int32_t
)mbuf
->m_pkthdr
.len
< (offset
+ length
)) {
978 *csum
= inet_cksum(mbuf
, protocol
, offset
, length
);
984 mbuf_inet6_cksum(mbuf_t mbuf
, int protocol
, u_int32_t offset
, u_int32_t length
,
987 if (mbuf
== NULL
|| length
== 0 || csum
== NULL
||
988 (u_int32_t
)mbuf
->m_pkthdr
.len
< (offset
+ length
)) {
992 *csum
= inet6_cksum(mbuf
, protocol
, offset
, length
);
997 mbuf_inet6_cksum(__unused mbuf_t mbuf
, __unused
int protocol
,
998 __unused u_int32_t offset
, __unused u_int32_t length
,
999 __unused u_int16_t
*csum
)
1001 panic("mbuf_inet6_cksum() doesn't exist on this platform\n");
1006 inet6_cksum(__unused
struct mbuf
*m
, __unused
unsigned int nxt
,
1007 __unused
unsigned int off
, __unused
unsigned int len
)
1009 panic("inet6_cksum() doesn't exist on this platform\n");
1013 void nd6_lookup_ipv6(void);
1015 nd6_lookup_ipv6(void)
1017 panic("nd6_lookup_ipv6() doesn't exist on this platform\n");
1021 in6addr_local(__unused
struct in6_addr
*a
)
1023 panic("in6addr_local() doesn't exist on this platform\n");
1027 void nd6_storelladdr(void);
1029 nd6_storelladdr(void)
1031 panic("nd6_storelladdr() doesn't exist on this platform\n");
1039 #define MTAG_FIRST_ID FIRST_KPI_STR_ID
1044 mbuf_tag_id_t
*out_id
)
1046 return net_str_id_find_internal(string
, out_id
, NSI_MBUF_TAG
, 1);
1053 mbuf_tag_type_t type
,
1059 u_int32_t mtag_id_first
, mtag_id_last
;
1061 if (data_p
!= NULL
) {
1065 /* Sanity check parameters */
1066 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
,
1068 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
1069 id
< mtag_id_first
|| id
> mtag_id_last
|| length
< 1 ||
1070 (length
& 0xffff0000) != 0 || data_p
== NULL
) {
1074 /* Make sure this mtag hasn't already been allocated */
1075 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
1080 /* Allocate an mtag */
1081 tag
= m_tag_create(id
, type
, length
, how
, mbuf
);
1083 return how
== M_WAITOK
? ENOMEM
: EWOULDBLOCK
;
1086 /* Attach the mtag and set *data_p */
1087 m_tag_prepend(mbuf
, tag
);
1097 mbuf_tag_type_t type
,
1102 u_int32_t mtag_id_first
, mtag_id_last
;
1104 if (length
!= NULL
) {
1107 if (data_p
!= NULL
) {
1111 /* Sanity check parameters */
1112 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
,
1114 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
1115 id
< mtag_id_first
|| id
> mtag_id_last
|| length
== NULL
||
1120 /* Locate an mtag */
1121 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
1126 /* Copy out the pointer to the data and the lenght value */
1127 *length
= tag
->m_tag_len
;
1137 mbuf_tag_type_t type
)
1140 u_int32_t mtag_id_first
, mtag_id_last
;
1142 /* Sanity check parameters */
1143 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
,
1145 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
1146 id
< mtag_id_first
|| id
> mtag_id_last
) {
1150 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
1155 m_tag_delete(mbuf
, tag
);
1159 * Maximum length of driver auxiliary data; keep this small to
1160 * fit in a single mbuf to avoid wasting memory, rounded down to
1161 * the nearest 64-bit boundary. This takes into account mbuf
1162 * tag-related (m_taghdr + m_tag) as well m_drvaux_tag structs.
1164 #define MBUF_DRVAUX_MAXLEN \
1165 P2ROUNDDOWN(MLEN - sizeof (struct m_taghdr) - \
1166 M_TAG_ALIGN(sizeof (struct m_drvaux_tag)), sizeof (uint64_t))
1169 mbuf_add_drvaux(mbuf_t mbuf
, mbuf_how_t how
, u_int32_t family
,
1170 u_int32_t subfamily
, size_t length
, void **data_p
)
1172 struct m_drvaux_tag
*p
;
1175 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
) ||
1176 length
== 0 || length
> MBUF_DRVAUX_MAXLEN
) {
1180 if (data_p
!= NULL
) {
1184 /* Check if one is already associated */
1185 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
1186 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) != NULL
) {
1190 /* Tag is (m_drvaux_tag + module specific data) */
1191 if ((tag
= m_tag_create(KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_DRVAUX
,
1192 sizeof(*p
) + length
, how
, mbuf
)) == NULL
) {
1193 return (how
== MBUF_WAITOK
) ? ENOMEM
: EWOULDBLOCK
;
1196 p
= (struct m_drvaux_tag
*)(tag
+ 1);
1197 p
->da_family
= family
;
1198 p
->da_subfamily
= subfamily
;
1199 p
->da_length
= length
;
1201 /* Associate the tag */
1202 m_tag_prepend(mbuf
, tag
);
1204 if (data_p
!= NULL
) {
1212 mbuf_find_drvaux(mbuf_t mbuf
, u_int32_t
*family_p
, u_int32_t
*subfamily_p
,
1213 u_int32_t
*length_p
, void **data_p
)
1215 struct m_drvaux_tag
*p
;
1218 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
) || data_p
== NULL
) {
1224 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
1225 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) == NULL
) {
1229 /* Must be at least size of m_drvaux_tag */
1230 VERIFY(tag
->m_tag_len
>= sizeof(*p
));
1232 p
= (struct m_drvaux_tag
*)(tag
+ 1);
1233 VERIFY(p
->da_length
> 0 && p
->da_length
<= MBUF_DRVAUX_MAXLEN
);
1235 if (family_p
!= NULL
) {
1236 *family_p
= p
->da_family
;
1238 if (subfamily_p
!= NULL
) {
1239 *subfamily_p
= p
->da_subfamily
;
1241 if (length_p
!= NULL
) {
1242 *length_p
= p
->da_length
;
1251 mbuf_del_drvaux(mbuf_t mbuf
)
1255 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
)) {
1259 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
1260 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) != NULL
) {
1261 m_tag_delete(mbuf
, tag
);
1267 mbuf_stats(struct mbuf_stat
*stats
)
1269 stats
->mbufs
= mbstat
.m_mbufs
;
1270 stats
->clusters
= mbstat
.m_clusters
;
1271 stats
->clfree
= mbstat
.m_clfree
;
1272 stats
->drops
= mbstat
.m_drops
;
1273 stats
->wait
= mbstat
.m_wait
;
1274 stats
->drain
= mbstat
.m_drain
;
1275 __builtin_memcpy(stats
->mtypes
, mbstat
.m_mtypes
, sizeof(stats
->mtypes
));
1276 stats
->mcfail
= mbstat
.m_mcfail
;
1277 stats
->mpfail
= mbstat
.m_mpfail
;
1278 stats
->msize
= mbstat
.m_msize
;
1279 stats
->mclbytes
= mbstat
.m_mclbytes
;
1280 stats
->minclsize
= mbstat
.m_minclsize
;
1281 stats
->mlen
= mbstat
.m_mlen
;
1282 stats
->mhlen
= mbstat
.m_mhlen
;
1283 stats
->bigclusters
= mbstat
.m_bigclusters
;
1284 stats
->bigclfree
= mbstat
.m_bigclfree
;
1285 stats
->bigmclbytes
= mbstat
.m_bigmclbytes
;
1289 mbuf_allocpacket(mbuf_how_t how
, size_t packetlen
, unsigned int *maxchunks
,
1294 unsigned int numpkts
= 1;
1295 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
1297 if (packetlen
== 0) {
1301 m
= m_allocpacket_internal(&numpkts
, packetlen
,
1302 maxchunks
? &numchunks
: NULL
, how
, 1, 0);
1304 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
) {
1311 *maxchunks
= numchunks
;
1321 mbuf_allocpacket_list(unsigned int numpkts
, mbuf_how_t how
, size_t packetlen
,
1322 unsigned int *maxchunks
, mbuf_t
*mbuf
)
1326 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
1332 if (packetlen
== 0) {
1336 m
= m_allocpacket_internal(&numpkts
, packetlen
,
1337 maxchunks
? &numchunks
: NULL
, how
, 1, 0);
1339 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
) {
1346 *maxchunks
= numchunks
;
1355 __private_extern__
size_t
1356 mbuf_pkt_list_len(mbuf_t m
)
1362 len
+= mbuf_pkthdr_len(n
);
1363 n
= mbuf_nextpkt(n
);
1368 __private_extern__
size_t
1369 mbuf_pkt_list_maxlen(mbuf_t m
)
1375 maxlen
+= mbuf_pkthdr_maxlen(n
);
1376 n
= mbuf_nextpkt(n
);
1382 * mbuf_copyback differs from m_copyback in a few ways:
1383 * 1) mbuf_copyback will allocate clusters for new mbufs we append
1384 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
1385 * 3) mbuf_copyback reports whether or not the operation succeeded
1386 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
1401 const char *cp
= data
;
1403 if (m
== NULL
|| len
== 0 || data
== NULL
) {
1407 while (off
> (mlen
= m
->m_len
)) {
1410 if (m
->m_next
== 0) {
1411 n
= m_getclr(how
, m
->m_type
);
1416 n
->m_len
= MIN(MLEN
, len
+ off
);
1423 mlen
= MIN(m
->m_len
- off
, len
);
1424 if (mlen
< len
&& m
->m_next
== NULL
&&
1425 mbuf_trailingspace(m
) > 0) {
1426 size_t grow
= MIN(mbuf_trailingspace(m
), len
- mlen
);
1430 bcopy(cp
, off
+ (char *)mbuf_data(m
), (unsigned)mlen
);
1439 if (m
->m_next
== 0) {
1440 n
= m_get(how
, m
->m_type
);
1445 if (len
> MINCLSIZE
) {
1447 * cluster allocation failure is okay,
1450 mbuf_mclget(how
, m
->m_type
, &n
);
1452 n
->m_len
= MIN(mbuf_maxlen(n
), len
);
1459 if ((m_start
->m_flags
& M_PKTHDR
) && (m_start
->m_pkthdr
.len
< totlen
)) {
1460 m_start
->m_pkthdr
.len
= totlen
;
1473 mbuf_get_mhlen(void)
1479 mbuf_get_minclsize(void)
1481 return MHLEN
+ MLEN
;
1485 mbuf_get_traffic_class_max_count(void)
1491 mbuf_get_traffic_class_index(mbuf_traffic_class_t tc
, u_int32_t
*index
)
1493 if (index
== NULL
|| (u_int32_t
)tc
>= MBUF_TC_MAX
) {
1497 *index
= MBUF_SCIDX(m_service_class_from_val(MBUF_TC2SCVAL(tc
)));
1501 mbuf_traffic_class_t
1502 mbuf_get_traffic_class(mbuf_t m
)
1504 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1508 return m_get_traffic_class(m
);
1512 mbuf_set_traffic_class(mbuf_t m
, mbuf_traffic_class_t tc
)
1514 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) ||
1515 ((u_int32_t
)tc
>= MBUF_TC_MAX
)) {
1519 return m_set_traffic_class(m
, tc
);
1523 mbuf_is_traffic_class_privileged(mbuf_t m
)
1525 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) ||
1526 !MBUF_VALID_SC(m
->m_pkthdr
.pkt_svc
)) {
1530 return (m
->m_pkthdr
.pkt_flags
& PKTF_PRIO_PRIVILEGED
) ? 1 : 0;
1534 mbuf_get_service_class_max_count(void)
1536 return MBUF_SC_MAX_CLASSES
;
1540 mbuf_get_service_class_index(mbuf_svc_class_t sc
, u_int32_t
*index
)
1542 if (index
== NULL
|| !MBUF_VALID_SC(sc
)) {
1546 *index
= MBUF_SCIDX(sc
);
1551 mbuf_get_service_class(mbuf_t m
)
1553 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1557 return m_get_service_class(m
);
1561 mbuf_set_service_class(mbuf_t m
, mbuf_svc_class_t sc
)
1563 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1567 return m_set_service_class(m
, sc
);
1571 mbuf_pkthdr_aux_flags(mbuf_t m
, mbuf_pkthdr_aux_flags_t
*flagsp
)
1575 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || flagsp
== NULL
) {
1580 flags
= m
->m_pkthdr
.pkt_flags
;
1581 if ((flags
& (PKTF_INET_RESOLVE
| PKTF_RESOLVE_RTR
)) ==
1582 (PKTF_INET_RESOLVE
| PKTF_RESOLVE_RTR
)) {
1583 *flagsp
|= MBUF_PKTAUXF_INET_RESOLVE_RTR
;
1585 if ((flags
& (PKTF_INET6_RESOLVE
| PKTF_RESOLVE_RTR
)) ==
1586 (PKTF_INET6_RESOLVE
| PKTF_RESOLVE_RTR
)) {
1587 *flagsp
|= MBUF_PKTAUXF_INET6_RESOLVE_RTR
;
1590 /* These 2 flags are mutually exclusive */
1592 (MBUF_PKTAUXF_INET_RESOLVE_RTR
| MBUF_PKTAUXF_INET6_RESOLVE_RTR
)) !=
1593 (MBUF_PKTAUXF_INET_RESOLVE_RTR
| MBUF_PKTAUXF_INET6_RESOLVE_RTR
));
1599 mbuf_get_driver_scratch(mbuf_t m
, u_int8_t
**area
, size_t *area_len
)
1601 if (m
== NULL
|| area
== NULL
|| area_len
== NULL
||
1602 !(m
->m_flags
& M_PKTHDR
)) {
1606 *area_len
= m_scratch_get(m
, area
);
1611 mbuf_get_unsent_data_bytes(const mbuf_t m
, u_int32_t
*unsent_data
)
1613 if (m
== NULL
|| unsent_data
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1617 if (!(m
->m_pkthdr
.pkt_flags
& PKTF_VALID_UNSENT_DATA
)) {
1621 *unsent_data
= m
->m_pkthdr
.bufstatus_if
+
1622 m
->m_pkthdr
.bufstatus_sndbuf
;
1627 mbuf_get_buffer_status(const mbuf_t m
, mbuf_buffer_status_t
*buf_status
)
1629 if (m
== NULL
|| buf_status
== NULL
|| !(m
->m_flags
& M_PKTHDR
) ||
1630 !(m
->m_pkthdr
.pkt_flags
& PKTF_VALID_UNSENT_DATA
)) {
1634 buf_status
->buf_interface
= m
->m_pkthdr
.bufstatus_if
;
1635 buf_status
->buf_sndbuf
= m
->m_pkthdr
.bufstatus_sndbuf
;
1640 mbuf_pkt_new_flow(const mbuf_t m
, u_int32_t
*retval
)
1642 if (m
== NULL
|| retval
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1645 if (m
->m_pkthdr
.pkt_flags
& PKTF_NEW_FLOW
) {
1654 mbuf_last_pkt(const mbuf_t m
, u_int32_t
*retval
)
1656 if (m
== NULL
|| retval
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1659 if (m
->m_pkthdr
.pkt_flags
& PKTF_LAST_PKT
) {
1668 mbuf_get_timestamp(mbuf_t m
, u_int64_t
*ts
, boolean_t
*valid
)
1670 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || ts
== NULL
) {
1674 if ((m
->m_pkthdr
.pkt_flags
& PKTF_TS_VALID
) == 0) {
1675 if (valid
!= NULL
) {
1680 if (valid
!= NULL
) {
1683 *ts
= m
->m_pkthdr
.pkt_timestamp
;
1689 mbuf_set_timestamp(mbuf_t m
, u_int64_t ts
, boolean_t valid
)
1691 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1695 if (valid
== FALSE
) {
1696 m
->m_pkthdr
.pkt_flags
&= ~PKTF_TS_VALID
;
1697 m
->m_pkthdr
.pkt_timestamp
= 0;
1699 m
->m_pkthdr
.pkt_flags
|= PKTF_TS_VALID
;
1700 m
->m_pkthdr
.pkt_timestamp
= ts
;
1706 mbuf_get_status(mbuf_t m
, kern_return_t
*status
)
1708 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || status
== NULL
) {
1712 if ((m
->m_pkthdr
.pkt_flags
& PKTF_DRIVER_MTAG
) == 0) {
1715 *status
= m
->m_pkthdr
.drv_tx_status
;
1721 driver_mtag_init(mbuf_t m
)
1723 if ((m
->m_pkthdr
.pkt_flags
& PKTF_DRIVER_MTAG
) == 0) {
1724 m
->m_pkthdr
.pkt_flags
|= PKTF_DRIVER_MTAG
;
1725 bzero(&m
->m_pkthdr
.driver_mtag
,
1726 sizeof(m
->m_pkthdr
.driver_mtag
));
1731 mbuf_set_status(mbuf_t m
, kern_return_t status
)
1733 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1737 driver_mtag_init(m
);
1739 m
->m_pkthdr
.drv_tx_status
= status
;
1745 mbuf_get_flowid(mbuf_t m
, u_int16_t
*flowid
)
1747 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || flowid
== NULL
) {
1751 if ((m
->m_pkthdr
.pkt_flags
& PKTF_DRIVER_MTAG
) == 0) {
1754 *flowid
= m
->m_pkthdr
.drv_flowid
;
1760 mbuf_set_flowid(mbuf_t m
, u_int16_t flowid
)
1762 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1766 driver_mtag_init(m
);
1768 m
->m_pkthdr
.drv_flowid
= flowid
;
1774 mbuf_get_tx_compl_data(mbuf_t m
, uintptr_t *arg
, uintptr_t *data
)
1776 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || arg
== NULL
||
1781 if ((m
->m_pkthdr
.pkt_flags
& PKTF_DRIVER_MTAG
) == 0) {
1785 *arg
= m
->m_pkthdr
.drv_tx_compl_arg
;
1786 *data
= m
->m_pkthdr
.drv_tx_compl_data
;
1792 mbuf_set_tx_compl_data(mbuf_t m
, uintptr_t arg
, uintptr_t data
)
1794 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1798 driver_mtag_init(m
);
1800 m
->m_pkthdr
.drv_tx_compl_arg
= arg
;
1801 m
->m_pkthdr
.drv_tx_compl_data
= data
;
1807 get_tx_compl_callback_index_locked(mbuf_tx_compl_func callback
)
1811 for (i
= 0; i
< MAX_MBUF_TX_COMPL_FUNC
; i
++) {
1812 if (mbuf_tx_compl_table
[i
] == callback
) {
1820 get_tx_compl_callback_index(mbuf_tx_compl_func callback
)
1824 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock
);
1826 i
= get_tx_compl_callback_index_locked(callback
);
1828 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock
);
1834 m_get_tx_compl_callback(u_int32_t idx
)
1836 mbuf_tx_compl_func cb
;
1838 if (idx
>= MAX_MBUF_TX_COMPL_FUNC
) {
1842 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock
);
1843 cb
= mbuf_tx_compl_table
[idx
];
1844 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock
);
1849 mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback
)
1854 if (callback
== NULL
) {
1858 lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock
);
1860 i
= get_tx_compl_callback_index_locked(callback
);
1866 /* assume the worst */
1868 for (i
= 0; i
< MAX_MBUF_TX_COMPL_FUNC
; i
++) {
1869 if (mbuf_tx_compl_table
[i
] == NULL
) {
1870 mbuf_tx_compl_table
[i
] = callback
;
1876 lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock
);
1882 mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback
)
1887 if (callback
== NULL
) {
1891 lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock
);
1893 /* assume the worst */
1895 for (i
= 0; i
< MAX_MBUF_TX_COMPL_FUNC
; i
++) {
1896 if (mbuf_tx_compl_table
[i
] == callback
) {
1897 mbuf_tx_compl_table
[i
] = NULL
;
1903 lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock
);
1909 mbuf_get_timestamp_requested(mbuf_t m
, boolean_t
*requested
)
1911 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1915 if ((m
->m_pkthdr
.pkt_flags
& PKTF_TX_COMPL_TS_REQ
) == 0) {
1924 mbuf_set_timestamp_requested(mbuf_t m
, uintptr_t *pktid
,
1925 mbuf_tx_compl_func callback
)
1929 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || callback
== NULL
||
1934 i
= get_tx_compl_callback_index(callback
);
1935 if (i
== UINT32_MAX
) {
1939 #if (DEBUG || DEVELOPMENT)
1940 VERIFY(i
< sizeof(m
->m_pkthdr
.pkt_compl_callbacks
));
1941 #endif /* (DEBUG || DEVELOPMENT) */
1943 if ((m
->m_pkthdr
.pkt_flags
& PKTF_TX_COMPL_TS_REQ
) == 0) {
1944 m
->m_pkthdr
.pkt_compl_callbacks
= 0;
1945 m
->m_pkthdr
.pkt_flags
|= PKTF_TX_COMPL_TS_REQ
;
1946 m
->m_pkthdr
.pkt_compl_context
=
1947 atomic_add_32_ov(&mbuf_tx_compl_index
, 1);
1949 #if (DEBUG || DEVELOPMENT)
1950 if (mbuf_tx_compl_debug
!= 0) {
1951 OSIncrementAtomic64(&mbuf_tx_compl_outstanding
);
1953 #endif /* (DEBUG || DEVELOPMENT) */
1955 m
->m_pkthdr
.pkt_compl_callbacks
|= (1 << i
);
1956 *pktid
= m
->m_pkthdr
.pkt_compl_context
;
1962 m_do_tx_compl_callback(struct mbuf
*m
, struct ifnet
*ifp
)
1970 if ((m
->m_pkthdr
.pkt_flags
& PKTF_TX_COMPL_TS_REQ
) == 0) {
1974 #if (DEBUG || DEVELOPMENT)
1975 if (mbuf_tx_compl_debug
!= 0 && ifp
!= NULL
&&
1976 (ifp
->if_xflags
& IFXF_TIMESTAMP_ENABLED
) != 0 &&
1977 (m
->m_pkthdr
.pkt_flags
& PKTF_TS_VALID
) == 0) {
1978 struct timespec now
;
1981 net_timernsec(&now
, &m
->m_pkthdr
.pkt_timestamp
);
1983 #endif /* (DEBUG || DEVELOPMENT) */
1985 for (i
= 0; i
< MAX_MBUF_TX_COMPL_FUNC
; i
++) {
1986 mbuf_tx_compl_func callback
;
1988 if ((m
->m_pkthdr
.pkt_compl_callbacks
& (1 << i
)) == 0) {
1992 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock
);
1993 callback
= mbuf_tx_compl_table
[i
];
1994 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock
);
1996 if (callback
!= NULL
) {
1997 callback(m
->m_pkthdr
.pkt_compl_context
,
1999 (m
->m_pkthdr
.pkt_flags
& PKTF_TS_VALID
) ?
2000 m
->m_pkthdr
.pkt_timestamp
: 0,
2001 m
->m_pkthdr
.drv_tx_compl_arg
,
2002 m
->m_pkthdr
.drv_tx_compl_data
,
2003 m
->m_pkthdr
.drv_tx_status
);
2006 m
->m_pkthdr
.pkt_compl_callbacks
= 0;
2008 #if (DEBUG || DEVELOPMENT)
2009 if (mbuf_tx_compl_debug
!= 0) {
2010 OSDecrementAtomic64(&mbuf_tx_compl_outstanding
);
2012 atomic_add_64(&mbuf_tx_compl_aborted
, 1);
2015 #endif /* (DEBUG || DEVELOPMENT) */