2 * Copyright (c) 2004-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <sys/param.h>
33 #include <sys/mcache.h>
34 #include <sys/socket.h>
35 #include <kern/debug.h>
36 #include <libkern/OSAtomic.h>
37 #include <kern/kalloc.h>
40 #include <netinet/in.h>
41 #include <netinet/ip_var.h>
43 #include "net/net_str_id.h"
45 /* mbuf flags visible to KPI clients; do not add private flags here */
46 static const mbuf_flags_t mbuf_flags_mask
= (MBUF_EXT
| MBUF_PKTHDR
| MBUF_EOR
|
47 MBUF_LOOP
| MBUF_BCAST
| MBUF_MCAST
| MBUF_FRAG
| MBUF_FIRSTFRAG
|
48 MBUF_LASTFRAG
| MBUF_PROMISC
| MBUF_HASFCS
);
50 /* Unalterable mbuf flags */
51 static const mbuf_flags_t mbuf_cflags_mask
= (MBUF_EXT
);
53 #define MAX_MBUF_TX_COMPL_FUNC 32
55 mbuf_tx_compl_table
[MAX_MBUF_TX_COMPL_FUNC
];
56 extern lck_rw_t
*mbuf_tx_compl_tbl_lock
;
57 u_int32_t mbuf_tx_compl_index
= 0;
59 #if (DEVELOPMENT || DEBUG)
60 int mbuf_tx_compl_debug
= 0;
61 SInt64 mbuf_tx_compl_outstanding
__attribute__((aligned(8))) = 0;
62 u_int64_t mbuf_tx_compl_aborted
__attribute__((aligned(8))) = 0;
64 SYSCTL_DECL(_kern_ipc
);
65 SYSCTL_NODE(_kern_ipc
, OID_AUTO
, mbtxcf
,
66 CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "");
67 SYSCTL_INT(_kern_ipc_mbtxcf
, OID_AUTO
, debug
,
68 CTLFLAG_RW
| CTLFLAG_LOCKED
, &mbuf_tx_compl_debug
, 0, "");
69 SYSCTL_INT(_kern_ipc_mbtxcf
, OID_AUTO
, index
,
70 CTLFLAG_RD
| CTLFLAG_LOCKED
, &mbuf_tx_compl_index
, 0, "");
71 SYSCTL_QUAD(_kern_ipc_mbtxcf
, OID_AUTO
, oustanding
,
72 CTLFLAG_RD
| CTLFLAG_LOCKED
, &mbuf_tx_compl_outstanding
, "");
73 SYSCTL_QUAD(_kern_ipc_mbtxcf
, OID_AUTO
, aborted
,
74 CTLFLAG_RD
| CTLFLAG_LOCKED
, &mbuf_tx_compl_aborted
, "");
75 #endif /* (DEBUG || DEVELOPMENT) */
78 mbuf_data(mbuf_t mbuf
)
84 mbuf_datastart(mbuf_t mbuf
)
86 if (mbuf
->m_flags
& M_EXT
) {
87 return mbuf
->m_ext
.ext_buf
;
89 if (mbuf
->m_flags
& M_PKTHDR
) {
90 return mbuf
->m_pktdat
;
96 mbuf_setdata(mbuf_t mbuf
, void *data
, size_t len
)
98 size_t start
= (size_t)((char *)mbuf_datastart(mbuf
));
99 size_t maxlen
= mbuf_maxlen(mbuf
);
101 if ((size_t)data
< start
|| ((size_t)data
) + len
> start
+ maxlen
) {
111 mbuf_align_32(mbuf_t mbuf
, size_t len
)
113 if ((mbuf
->m_flags
& M_EXT
) != 0 && m_mclhasreference(mbuf
)) {
116 mbuf
->m_data
= mbuf_datastart(mbuf
);
118 ((mbuf_trailingspace(mbuf
) - len
) & ~(sizeof(u_int32_t
) - 1));
124 * This function is used to provide mcl_to_paddr via symbol indirection,
125 * please avoid any change in behavior or remove the indirection in
126 * config/Unsupported*
129 mbuf_data_to_physical(void *ptr
)
131 return (addr64_t
)mcl_to_paddr(ptr
);
135 mbuf_get(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
137 /* Must set *mbuf to NULL in failure case */
138 *mbuf
= m_get(how
, type
);
140 return *mbuf
== NULL
? ENOMEM
: 0;
144 mbuf_gethdr(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
146 /* Must set *mbuf to NULL in failure case */
147 *mbuf
= m_gethdr(how
, type
);
149 return *mbuf
== NULL
? ENOMEM
: 0;
153 mbuf_attachcluster(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
,
154 caddr_t extbuf
, void (*extfree
)(caddr_t
, u_int
, caddr_t
),
155 size_t extsize
, caddr_t extarg
)
157 if (mbuf
== NULL
|| extbuf
== NULL
|| extfree
== NULL
|| extsize
== 0) {
161 if ((*mbuf
= m_clattach(*mbuf
, type
, extbuf
,
162 extfree
, extsize
, extarg
, how
, 0)) == NULL
) {
170 mbuf_ring_cluster_alloc(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
,
171 void (*extfree
)(caddr_t
, u_int
, caddr_t
), size_t *size
)
173 caddr_t extbuf
= NULL
;
176 if (mbuf
== NULL
|| extfree
== NULL
|| size
== NULL
|| *size
== 0) {
180 if ((err
= mbuf_alloccluster(how
, size
, &extbuf
)) != 0) {
184 if ((*mbuf
= m_clattach(*mbuf
, type
, extbuf
,
185 extfree
, *size
, NULL
, how
, 1)) == NULL
) {
186 mbuf_freecluster(extbuf
, *size
);
194 mbuf_ring_cluster_is_active(mbuf_t mbuf
)
196 return m_ext_paired_is_active(mbuf
);
200 mbuf_ring_cluster_activate(mbuf_t mbuf
)
202 if (mbuf_ring_cluster_is_active(mbuf
)) {
206 m_ext_paired_activate(mbuf
);
211 mbuf_cluster_set_prop(mbuf_t mbuf
, u_int32_t oldprop
, u_int32_t newprop
)
213 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_EXT
)) {
217 return m_ext_set_prop(mbuf
, oldprop
, newprop
) ? 0 : EBUSY
;
221 mbuf_cluster_get_prop(mbuf_t mbuf
, u_int32_t
*prop
)
223 if (mbuf
== NULL
|| prop
== NULL
|| !(mbuf
->m_flags
& M_EXT
)) {
227 *prop
= m_ext_get_prop(mbuf
);
232 mbuf_alloccluster(mbuf_how_t how
, size_t *size
, caddr_t
*addr
)
234 if (size
== NULL
|| *size
== 0 || addr
== NULL
) {
240 /* Jumbo cluster pool not available? */
241 if (*size
> MBIGCLBYTES
&& njcl
== 0) {
245 if (*size
<= MCLBYTES
&& (*addr
= m_mclalloc(how
)) != NULL
) {
247 } else if (*size
> MCLBYTES
&& *size
<= MBIGCLBYTES
&&
248 (*addr
= m_bigalloc(how
)) != NULL
) {
250 } else if (*size
> MBIGCLBYTES
&& *size
<= M16KCLBYTES
&&
251 (*addr
= m_16kalloc(how
)) != NULL
) {
265 mbuf_freecluster(caddr_t addr
, size_t size
)
267 if (size
!= MCLBYTES
&& size
!= MBIGCLBYTES
&& size
!= M16KCLBYTES
) {
268 panic("%s: invalid size (%ld) for cluster %p", __func__
,
272 if (size
== MCLBYTES
) {
274 } else if (size
== MBIGCLBYTES
) {
275 m_bigfree(addr
, MBIGCLBYTES
, NULL
);
276 } else if (njcl
> 0) {
277 m_16kfree(addr
, M16KCLBYTES
, NULL
);
279 panic("%s: freeing jumbo cluster to an empty pool", __func__
);
284 mbuf_getcluster(mbuf_how_t how
, mbuf_type_t type
, size_t size
, mbuf_t
*mbuf
)
286 /* Must set *mbuf to NULL in failure case */
294 *mbuf
= m_get(how
, type
);
301 * At the time this code was written, m_{mclget,mbigget,m16kget}
302 * would always return the same value that was passed in to it.
304 if (size
== MCLBYTES
) {
305 *mbuf
= m_mclget(*mbuf
, how
);
306 } else if (size
== MBIGCLBYTES
) {
307 *mbuf
= m_mbigget(*mbuf
, how
);
308 } else if (size
== M16KCLBYTES
) {
310 *mbuf
= m_m16kget(*mbuf
, how
);
312 /* Jumbo cluster pool not available? */
320 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0) {
324 if (created
&& error
!= 0) {
332 mbuf_mclget(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
334 /* Must set *mbuf to NULL in failure case */
341 error
= mbuf_get(how
, type
, mbuf
);
349 * At the time this code was written, m_mclget would always
350 * return the same value that was passed in to it.
352 *mbuf
= m_mclget(*mbuf
, how
);
354 if (created
&& ((*mbuf
)->m_flags
& M_EXT
) == 0) {
358 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0) {
366 mbuf_getpacket(mbuf_how_t how
, mbuf_t
*mbuf
)
368 /* Must set *mbuf to NULL in failure case */
371 *mbuf
= m_getpacket_how(how
);
374 if (how
== MBUF_WAITOK
) {
385 * This function is used to provide m_free via symbol indirection, please avoid
386 * any change in behavior or remove the indirection in config/Unsupported*
389 mbuf_free(mbuf_t mbuf
)
395 * This function is used to provide m_freem via symbol indirection, please avoid
396 * any change in behavior or remove the indirection in config/Unsupported*
399 mbuf_freem(mbuf_t mbuf
)
405 mbuf_freem_list(mbuf_t mbuf
)
407 return m_freem_list(mbuf
);
411 mbuf_leadingspace(const mbuf_t mbuf
)
413 return M_LEADINGSPACE(mbuf
);
417 * This function is used to provide m_trailingspace via symbol indirection,
418 * please avoid any change in behavior or remove the indirection in
419 * config/Unsupported*
422 mbuf_trailingspace(const mbuf_t mbuf
)
424 return M_TRAILINGSPACE(mbuf
);
429 mbuf_copym(const mbuf_t src
, size_t offset
, size_t len
,
430 mbuf_how_t how
, mbuf_t
*new_mbuf
)
432 /* Must set *mbuf to NULL in failure case */
433 *new_mbuf
= m_copym(src
, offset
, len
, how
);
435 return *new_mbuf
== NULL
? ENOMEM
: 0;
439 mbuf_dup(const mbuf_t src
, mbuf_how_t how
, mbuf_t
*new_mbuf
)
441 /* Must set *new_mbuf to NULL in failure case */
442 *new_mbuf
= m_dup(src
, how
);
444 return *new_mbuf
== NULL
? ENOMEM
: 0;
448 mbuf_prepend(mbuf_t
*orig
, size_t len
, mbuf_how_t how
)
450 /* Must set *orig to NULL in failure case */
451 *orig
= m_prepend_2(*orig
, len
, how
, 0);
453 return *orig
== NULL
? ENOMEM
: 0;
457 mbuf_split(mbuf_t src
, size_t offset
,
458 mbuf_how_t how
, mbuf_t
*new_mbuf
)
460 /* Must set *new_mbuf to NULL in failure case */
461 *new_mbuf
= m_split(src
, offset
, how
);
463 return *new_mbuf
== NULL
? ENOMEM
: 0;
467 mbuf_pullup(mbuf_t
*mbuf
, size_t len
)
469 /* Must set *mbuf to NULL in failure case */
470 *mbuf
= m_pullup(*mbuf
, len
);
472 return *mbuf
== NULL
? ENOMEM
: 0;
476 mbuf_pulldown(mbuf_t src
, size_t *offset
, size_t len
, mbuf_t
*location
)
478 /* Must set *location to NULL in failure case */
480 *location
= m_pulldown(src
, *offset
, len
, &new_offset
);
481 *offset
= new_offset
;
483 return *location
== NULL
? ENOMEM
: 0;
487 * This function is used to provide m_adj via symbol indirection, please avoid
488 * any change in behavior or remove the indirection in config/Unsupported*
491 mbuf_adj(mbuf_t mbuf
, int len
)
497 mbuf_adjustlen(mbuf_t m
, int amount
)
499 /* Verify m_len will be valid after adding amount */
501 int used
= (size_t)mbuf_data(m
) - (size_t)mbuf_datastart(m
) +
504 if ((size_t)(amount
+ used
) > mbuf_maxlen(m
)) {
507 } else if (-amount
> m
->m_len
) {
516 mbuf_concatenate(mbuf_t dst
, mbuf_t src
)
524 /* return dst as is in the current implementation */
528 mbuf_copydata(const mbuf_t m0
, size_t off
, size_t len
, void *out_data
)
530 /* Copied m_copydata, added error handling (don't just panic) */
534 if (off
>= INT_MAX
|| len
>= INT_MAX
) {
542 if (off
< (size_t)m
->m_len
) {
552 count
= m
->m_len
- off
> len
? len
: m
->m_len
- off
;
553 bcopy(mtod(m
, caddr_t
) + off
, out_data
, count
);
555 out_data
= ((char *)out_data
) + count
;
564 mbuf_mclhasreference(mbuf_t mbuf
)
566 if ((mbuf
->m_flags
& M_EXT
)) {
567 return m_mclhasreference(mbuf
);
576 mbuf_next(const mbuf_t mbuf
)
582 mbuf_setnext(mbuf_t mbuf
, mbuf_t next
)
584 if (next
&& ((next
)->m_nextpkt
!= NULL
||
585 (next
)->m_type
== MT_FREE
)) {
594 mbuf_nextpkt(const mbuf_t mbuf
)
596 return mbuf
->m_nextpkt
;
600 mbuf_setnextpkt(mbuf_t mbuf
, mbuf_t nextpkt
)
602 mbuf
->m_nextpkt
= nextpkt
;
606 mbuf_len(const mbuf_t mbuf
)
612 mbuf_setlen(mbuf_t mbuf
, size_t len
)
618 mbuf_maxlen(const mbuf_t mbuf
)
620 if (mbuf
->m_flags
& M_EXT
) {
621 return mbuf
->m_ext
.ext_size
;
623 return &mbuf
->m_dat
[MLEN
] - ((char *)mbuf_datastart(mbuf
));
627 mbuf_type(const mbuf_t mbuf
)
633 mbuf_settype(mbuf_t mbuf
, mbuf_type_t new_type
)
635 if (new_type
== MBUF_TYPE_FREE
) {
639 m_mchtype(mbuf
, new_type
);
645 mbuf_flags(const mbuf_t mbuf
)
647 return mbuf
->m_flags
& mbuf_flags_mask
;
651 mbuf_setflags(mbuf_t mbuf
, mbuf_flags_t flags
)
654 mbuf_flags_t oflags
= mbuf
->m_flags
;
657 * 1. Return error if public but un-alterable flags are changed
659 * 2. Return error if bits other than public flags are set in passed
661 * Please note that private flag bits must be passed as reset by
662 * kexts, as they must use mbuf_flags KPI to get current set of
663 * mbuf flags and mbuf_flags KPI does not expose private flags.
665 if ((flags
^ oflags
) & mbuf_cflags_mask
) {
667 } else if (flags
& ~mbuf_flags_mask
) {
670 mbuf
->m_flags
= flags
| (mbuf
->m_flags
& ~mbuf_flags_mask
);
672 * If M_PKTHDR bit has changed, we have work to do;
673 * m_reinit() will take care of setting/clearing the
674 * bit, as well as the rest of bookkeeping.
676 if ((oflags
^ mbuf
->m_flags
) & M_PKTHDR
) {
677 mbuf
->m_flags
^= M_PKTHDR
; /* restore */
679 (mbuf
->m_flags
& M_PKTHDR
) ? 0 : 1);
687 mbuf_setflags_mask(mbuf_t mbuf
, mbuf_flags_t flags
, mbuf_flags_t mask
)
691 if (mask
& (~mbuf_flags_mask
| mbuf_cflags_mask
)) {
694 mbuf_flags_t oflags
= mbuf
->m_flags
;
695 mbuf
->m_flags
= (flags
& mask
) | (mbuf
->m_flags
& ~mask
);
697 * If M_PKTHDR bit has changed, we have work to do;
698 * m_reinit() will take care of setting/clearing the
699 * bit, as well as the rest of bookkeeping.
701 if ((oflags
^ mbuf
->m_flags
) & M_PKTHDR
) {
702 mbuf
->m_flags
^= M_PKTHDR
; /* restore */
704 (mbuf
->m_flags
& M_PKTHDR
) ? 0 : 1);
712 mbuf_copy_pkthdr(mbuf_t dest
, const mbuf_t src
)
714 if (((src
)->m_flags
& M_PKTHDR
) == 0) {
718 m_copy_pkthdr(dest
, src
);
724 mbuf_pkthdr_len(const mbuf_t mbuf
)
726 if (((mbuf
)->m_flags
& M_PKTHDR
) == 0) {
730 * While we Assert for development or debug builds,
731 * also make sure we never return negative length
734 ASSERT(mbuf
->m_pkthdr
.len
>= 0);
735 if (mbuf
->m_pkthdr
.len
< 0) {
738 return mbuf
->m_pkthdr
.len
;
741 __private_extern__
size_t
742 mbuf_pkthdr_maxlen(mbuf_t m
)
748 maxlen
+= mbuf_maxlen(n
);
755 mbuf_pkthdr_setlen(mbuf_t mbuf
, size_t len
)
757 if (len
> INT32_MAX
) {
761 mbuf
->m_pkthdr
.len
= len
;
765 mbuf_pkthdr_adjustlen(mbuf_t mbuf
, int amount
)
767 mbuf
->m_pkthdr
.len
+= amount
;
771 mbuf_pkthdr_rcvif(const mbuf_t mbuf
)
774 * If we reference count ifnets, we should take a reference here
777 return mbuf
->m_pkthdr
.rcvif
;
781 mbuf_pkthdr_setrcvif(mbuf_t mbuf
, ifnet_t ifnet
)
783 /* May want to walk ifnet list to determine if interface is valid */
784 mbuf
->m_pkthdr
.rcvif
= (struct ifnet
*)ifnet
;
789 mbuf_pkthdr_header(const mbuf_t mbuf
)
791 return mbuf
->m_pkthdr
.pkt_hdr
;
795 mbuf_pkthdr_setheader(mbuf_t mbuf
, void *header
)
797 mbuf
->m_pkthdr
.pkt_hdr
= (void*)header
;
801 mbuf_inbound_modified(mbuf_t mbuf
)
803 /* Invalidate hardware generated checksum flags */
804 mbuf
->m_pkthdr
.csum_flags
= 0;
808 mbuf_outbound_finalize(struct mbuf
*m
, u_int32_t pf
, size_t o
)
810 /* Generate the packet in software, client needs it */
813 (void) in_finalize_cksum(m
, o
, m
->m_pkthdr
.csum_flags
);
819 * Checksum offload should not have been enabled when
820 * extension headers exist; indicate that the callee
821 * should skip such case by setting optlen to -1.
823 (void) in6_finalize_cksum(m
, o
, -1, -1, m
->m_pkthdr
.csum_flags
);
837 mbuf
->m_pkthdr
.csum_flags
|= CSUM_VLAN_TAG_VALID
;
838 mbuf
->m_pkthdr
.vlan_tag
= vlan
;
848 if ((mbuf
->m_pkthdr
.csum_flags
& CSUM_VLAN_TAG_VALID
) == 0) {
849 return ENXIO
; // No vlan tag set
851 *vlan
= mbuf
->m_pkthdr
.vlan_tag
;
860 mbuf
->m_pkthdr
.csum_flags
&= ~CSUM_VLAN_TAG_VALID
;
861 mbuf
->m_pkthdr
.vlan_tag
= 0;
866 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags
=
867 MBUF_CSUM_REQ_IP
| MBUF_CSUM_REQ_TCP
| MBUF_CSUM_REQ_UDP
|
868 MBUF_CSUM_PARTIAL
| MBUF_CSUM_REQ_TCPIPV6
| MBUF_CSUM_REQ_UDPIPV6
;
871 mbuf_set_csum_requested(
873 mbuf_csum_request_flags_t request
,
876 request
&= mbuf_valid_csum_request_flags
;
877 mbuf
->m_pkthdr
.csum_flags
=
878 (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | request
;
879 mbuf
->m_pkthdr
.csum_data
= value
;
884 static const mbuf_tso_request_flags_t mbuf_valid_tso_request_flags
=
885 MBUF_TSO_IPV4
| MBUF_TSO_IPV6
;
888 mbuf_get_tso_requested(
890 mbuf_tso_request_flags_t
*request
,
893 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
894 request
== NULL
|| value
== NULL
) {
898 *request
= mbuf
->m_pkthdr
.csum_flags
;
899 *request
&= mbuf_valid_tso_request_flags
;
900 if (*request
&& value
!= NULL
) {
901 *value
= mbuf
->m_pkthdr
.tso_segsz
;
908 mbuf_get_csum_requested(
910 mbuf_csum_request_flags_t
*request
,
913 *request
= mbuf
->m_pkthdr
.csum_flags
;
914 *request
&= mbuf_valid_csum_request_flags
;
916 *value
= mbuf
->m_pkthdr
.csum_data
;
923 mbuf_clear_csum_requested(
926 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
927 mbuf
->m_pkthdr
.csum_data
= 0;
932 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags
=
933 MBUF_CSUM_DID_IP
| MBUF_CSUM_IP_GOOD
| MBUF_CSUM_DID_DATA
|
934 MBUF_CSUM_PSEUDO_HDR
| MBUF_CSUM_PARTIAL
;
937 mbuf_set_csum_performed(
939 mbuf_csum_performed_flags_t performed
,
942 performed
&= mbuf_valid_csum_performed_flags
;
943 mbuf
->m_pkthdr
.csum_flags
=
944 (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | performed
;
945 mbuf
->m_pkthdr
.csum_data
= value
;
951 mbuf_get_csum_performed(
953 mbuf_csum_performed_flags_t
*performed
,
957 mbuf
->m_pkthdr
.csum_flags
& mbuf_valid_csum_performed_flags
;
958 *value
= mbuf
->m_pkthdr
.csum_data
;
964 mbuf_clear_csum_performed(
967 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
968 mbuf
->m_pkthdr
.csum_data
= 0;
974 mbuf_inet_cksum(mbuf_t mbuf
, int protocol
, u_int32_t offset
, u_int32_t length
,
977 if (mbuf
== NULL
|| length
== 0 || csum
== NULL
||
978 (u_int32_t
)mbuf
->m_pkthdr
.len
< (offset
+ length
)) {
982 *csum
= inet_cksum(mbuf
, protocol
, offset
, length
);
988 mbuf_inet6_cksum(mbuf_t mbuf
, int protocol
, u_int32_t offset
, u_int32_t length
,
991 if (mbuf
== NULL
|| length
== 0 || csum
== NULL
||
992 (u_int32_t
)mbuf
->m_pkthdr
.len
< (offset
+ length
)) {
996 *csum
= inet6_cksum(mbuf
, protocol
, offset
, length
);
1001 mbuf_inet6_cksum(__unused mbuf_t mbuf
, __unused
int protocol
,
1002 __unused u_int32_t offset
, __unused u_int32_t length
,
1003 __unused u_int16_t
*csum
)
1005 panic("mbuf_inet6_cksum() doesn't exist on this platform\n");
1010 inet6_cksum(__unused
struct mbuf
*m
, __unused
unsigned int nxt
,
1011 __unused
unsigned int off
, __unused
unsigned int len
)
1013 panic("inet6_cksum() doesn't exist on this platform\n");
1017 void nd6_lookup_ipv6(void);
1019 nd6_lookup_ipv6(void)
1021 panic("nd6_lookup_ipv6() doesn't exist on this platform\n");
1025 in6addr_local(__unused
struct in6_addr
*a
)
1027 panic("in6addr_local() doesn't exist on this platform\n");
1031 void nd6_storelladdr(void);
1033 nd6_storelladdr(void)
1035 panic("nd6_storelladdr() doesn't exist on this platform\n");
1043 #define MTAG_FIRST_ID FIRST_KPI_STR_ID
1048 mbuf_tag_id_t
*out_id
)
1050 return net_str_id_find_internal(string
, out_id
, NSI_MBUF_TAG
, 1);
1057 mbuf_tag_type_t type
,
1063 u_int32_t mtag_id_first
, mtag_id_last
;
1065 if (data_p
!= NULL
) {
1069 /* Sanity check parameters */
1070 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
,
1072 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
1073 id
< mtag_id_first
|| id
> mtag_id_last
|| length
< 1 ||
1074 (length
& 0xffff0000) != 0 || data_p
== NULL
) {
1078 /* Make sure this mtag hasn't already been allocated */
1079 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
1084 /* Allocate an mtag */
1085 tag
= m_tag_create(id
, type
, length
, how
, mbuf
);
1087 return how
== M_WAITOK
? ENOMEM
: EWOULDBLOCK
;
1090 /* Attach the mtag and set *data_p */
1091 m_tag_prepend(mbuf
, tag
);
1101 mbuf_tag_type_t type
,
1106 u_int32_t mtag_id_first
, mtag_id_last
;
1108 if (length
!= NULL
) {
1111 if (data_p
!= NULL
) {
1115 /* Sanity check parameters */
1116 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
,
1118 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
1119 id
< mtag_id_first
|| id
> mtag_id_last
|| length
== NULL
||
1124 /* Locate an mtag */
1125 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
1130 /* Copy out the pointer to the data and the lenght value */
1131 *length
= tag
->m_tag_len
;
1141 mbuf_tag_type_t type
)
1144 u_int32_t mtag_id_first
, mtag_id_last
;
1146 /* Sanity check parameters */
1147 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
,
1149 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
1150 id
< mtag_id_first
|| id
> mtag_id_last
) {
1154 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
1159 m_tag_delete(mbuf
, tag
);
1163 * Maximum length of driver auxiliary data; keep this small to
1164 * fit in a single mbuf to avoid wasting memory, rounded down to
1165 * the nearest 64-bit boundary. This takes into account mbuf
1166 * tag-related (m_taghdr + m_tag) as well m_drvaux_tag structs.
1168 #define MBUF_DRVAUX_MAXLEN \
1169 P2ROUNDDOWN(MLEN - sizeof (struct m_taghdr) - \
1170 M_TAG_ALIGN(sizeof (struct m_drvaux_tag)), sizeof (uint64_t))
1173 mbuf_add_drvaux(mbuf_t mbuf
, mbuf_how_t how
, u_int32_t family
,
1174 u_int32_t subfamily
, size_t length
, void **data_p
)
1176 struct m_drvaux_tag
*p
;
1179 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
) ||
1180 length
== 0 || length
> MBUF_DRVAUX_MAXLEN
) {
1184 if (data_p
!= NULL
) {
1188 /* Check if one is already associated */
1189 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
1190 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) != NULL
) {
1194 /* Tag is (m_drvaux_tag + module specific data) */
1195 if ((tag
= m_tag_create(KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_DRVAUX
,
1196 sizeof(*p
) + length
, how
, mbuf
)) == NULL
) {
1197 return (how
== MBUF_WAITOK
) ? ENOMEM
: EWOULDBLOCK
;
1200 p
= (struct m_drvaux_tag
*)(tag
+ 1);
1201 p
->da_family
= family
;
1202 p
->da_subfamily
= subfamily
;
1203 p
->da_length
= length
;
1205 /* Associate the tag */
1206 m_tag_prepend(mbuf
, tag
);
1208 if (data_p
!= NULL
) {
1216 mbuf_find_drvaux(mbuf_t mbuf
, u_int32_t
*family_p
, u_int32_t
*subfamily_p
,
1217 u_int32_t
*length_p
, void **data_p
)
1219 struct m_drvaux_tag
*p
;
1222 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
) || data_p
== NULL
) {
1228 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
1229 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) == NULL
) {
1233 /* Must be at least size of m_drvaux_tag */
1234 VERIFY(tag
->m_tag_len
>= sizeof(*p
));
1236 p
= (struct m_drvaux_tag
*)(tag
+ 1);
1237 VERIFY(p
->da_length
> 0 && p
->da_length
<= MBUF_DRVAUX_MAXLEN
);
1239 if (family_p
!= NULL
) {
1240 *family_p
= p
->da_family
;
1242 if (subfamily_p
!= NULL
) {
1243 *subfamily_p
= p
->da_subfamily
;
1245 if (length_p
!= NULL
) {
1246 *length_p
= p
->da_length
;
1255 mbuf_del_drvaux(mbuf_t mbuf
)
1259 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
)) {
1263 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
1264 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) != NULL
) {
1265 m_tag_delete(mbuf
, tag
);
1271 mbuf_stats(struct mbuf_stat
*stats
)
1273 stats
->mbufs
= mbstat
.m_mbufs
;
1274 stats
->clusters
= mbstat
.m_clusters
;
1275 stats
->clfree
= mbstat
.m_clfree
;
1276 stats
->drops
= mbstat
.m_drops
;
1277 stats
->wait
= mbstat
.m_wait
;
1278 stats
->drain
= mbstat
.m_drain
;
1279 __builtin_memcpy(stats
->mtypes
, mbstat
.m_mtypes
, sizeof(stats
->mtypes
));
1280 stats
->mcfail
= mbstat
.m_mcfail
;
1281 stats
->mpfail
= mbstat
.m_mpfail
;
1282 stats
->msize
= mbstat
.m_msize
;
1283 stats
->mclbytes
= mbstat
.m_mclbytes
;
1284 stats
->minclsize
= mbstat
.m_minclsize
;
1285 stats
->mlen
= mbstat
.m_mlen
;
1286 stats
->mhlen
= mbstat
.m_mhlen
;
1287 stats
->bigclusters
= mbstat
.m_bigclusters
;
1288 stats
->bigclfree
= mbstat
.m_bigclfree
;
1289 stats
->bigmclbytes
= mbstat
.m_bigmclbytes
;
1293 mbuf_allocpacket(mbuf_how_t how
, size_t packetlen
, unsigned int *maxchunks
,
1298 unsigned int numpkts
= 1;
1299 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
1301 if (packetlen
== 0) {
1305 m
= m_allocpacket_internal(&numpkts
, packetlen
,
1306 maxchunks
? &numchunks
: NULL
, how
, 1, 0);
1308 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
) {
1315 *maxchunks
= numchunks
;
1325 mbuf_allocpacket_list(unsigned int numpkts
, mbuf_how_t how
, size_t packetlen
,
1326 unsigned int *maxchunks
, mbuf_t
*mbuf
)
1330 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
1336 if (packetlen
== 0) {
1340 m
= m_allocpacket_internal(&numpkts
, packetlen
,
1341 maxchunks
? &numchunks
: NULL
, how
, 1, 0);
1343 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
) {
1350 *maxchunks
= numchunks
;
1359 __private_extern__
size_t
1360 mbuf_pkt_list_len(mbuf_t m
)
1366 len
+= mbuf_pkthdr_len(n
);
1367 n
= mbuf_nextpkt(n
);
1372 __private_extern__
size_t
1373 mbuf_pkt_list_maxlen(mbuf_t m
)
1379 maxlen
+= mbuf_pkthdr_maxlen(n
);
1380 n
= mbuf_nextpkt(n
);
1386 * mbuf_copyback differs from m_copyback in a few ways:
1387 * 1) mbuf_copyback will allocate clusters for new mbufs we append
1388 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
1389 * 3) mbuf_copyback reports whether or not the operation succeeded
1390 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
1405 const char *cp
= data
;
1407 if (m
== NULL
|| len
== 0 || data
== NULL
) {
1411 while (off
> (mlen
= m
->m_len
)) {
1414 if (m
->m_next
== 0) {
1415 n
= m_getclr(how
, m
->m_type
);
1420 n
->m_len
= MIN(MLEN
, len
+ off
);
1427 mlen
= MIN(m
->m_len
- off
, len
);
1428 if (mlen
< len
&& m
->m_next
== NULL
&&
1429 mbuf_trailingspace(m
) > 0) {
1430 size_t grow
= MIN(mbuf_trailingspace(m
), len
- mlen
);
1434 bcopy(cp
, off
+ (char *)mbuf_data(m
), (unsigned)mlen
);
1443 if (m
->m_next
== 0) {
1444 n
= m_get(how
, m
->m_type
);
1449 if (len
> MINCLSIZE
) {
1451 * cluster allocation failure is okay,
1454 mbuf_mclget(how
, m
->m_type
, &n
);
1456 n
->m_len
= MIN(mbuf_maxlen(n
), len
);
1463 if ((m_start
->m_flags
& M_PKTHDR
) && (m_start
->m_pkthdr
.len
< totlen
)) {
1464 m_start
->m_pkthdr
.len
= totlen
;
1477 mbuf_get_mhlen(void)
1483 mbuf_get_minclsize(void)
1485 return MHLEN
+ MLEN
;
1489 mbuf_get_traffic_class_max_count(void)
1495 mbuf_get_traffic_class_index(mbuf_traffic_class_t tc
, u_int32_t
*index
)
1497 if (index
== NULL
|| (u_int32_t
)tc
>= MBUF_TC_MAX
) {
1501 *index
= MBUF_SCIDX(m_service_class_from_val(MBUF_TC2SCVAL(tc
)));
1505 mbuf_traffic_class_t
1506 mbuf_get_traffic_class(mbuf_t m
)
1508 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1512 return m_get_traffic_class(m
);
1516 mbuf_set_traffic_class(mbuf_t m
, mbuf_traffic_class_t tc
)
1518 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) ||
1519 ((u_int32_t
)tc
>= MBUF_TC_MAX
)) {
1523 return m_set_traffic_class(m
, tc
);
1527 mbuf_is_traffic_class_privileged(mbuf_t m
)
1529 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) ||
1530 !MBUF_VALID_SC(m
->m_pkthdr
.pkt_svc
)) {
1534 return (m
->m_pkthdr
.pkt_flags
& PKTF_PRIO_PRIVILEGED
) ? 1 : 0;
1538 mbuf_get_service_class_max_count(void)
1540 return MBUF_SC_MAX_CLASSES
;
1544 mbuf_get_service_class_index(mbuf_svc_class_t sc
, u_int32_t
*index
)
1546 if (index
== NULL
|| !MBUF_VALID_SC(sc
)) {
1550 *index
= MBUF_SCIDX(sc
);
1555 mbuf_get_service_class(mbuf_t m
)
1557 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1561 return m_get_service_class(m
);
1565 mbuf_set_service_class(mbuf_t m
, mbuf_svc_class_t sc
)
1567 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1571 return m_set_service_class(m
, sc
);
1575 mbuf_pkthdr_aux_flags(mbuf_t m
, mbuf_pkthdr_aux_flags_t
*flagsp
)
1579 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || flagsp
== NULL
) {
1584 flags
= m
->m_pkthdr
.pkt_flags
;
1585 if ((flags
& (PKTF_INET_RESOLVE
| PKTF_RESOLVE_RTR
)) ==
1586 (PKTF_INET_RESOLVE
| PKTF_RESOLVE_RTR
)) {
1587 *flagsp
|= MBUF_PKTAUXF_INET_RESOLVE_RTR
;
1589 if ((flags
& (PKTF_INET6_RESOLVE
| PKTF_RESOLVE_RTR
)) ==
1590 (PKTF_INET6_RESOLVE
| PKTF_RESOLVE_RTR
)) {
1591 *flagsp
|= MBUF_PKTAUXF_INET6_RESOLVE_RTR
;
1594 /* These 2 flags are mutually exclusive */
1596 (MBUF_PKTAUXF_INET_RESOLVE_RTR
| MBUF_PKTAUXF_INET6_RESOLVE_RTR
)) !=
1597 (MBUF_PKTAUXF_INET_RESOLVE_RTR
| MBUF_PKTAUXF_INET6_RESOLVE_RTR
));
1603 mbuf_get_driver_scratch(mbuf_t m
, u_int8_t
**area
, size_t *area_len
)
1605 if (m
== NULL
|| area
== NULL
|| area_len
== NULL
||
1606 !(m
->m_flags
& M_PKTHDR
)) {
1610 *area_len
= m_scratch_get(m
, area
);
1615 mbuf_get_unsent_data_bytes(const mbuf_t m
, u_int32_t
*unsent_data
)
1617 if (m
== NULL
|| unsent_data
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1621 if (!(m
->m_pkthdr
.pkt_flags
& PKTF_VALID_UNSENT_DATA
)) {
1625 *unsent_data
= m
->m_pkthdr
.bufstatus_if
+
1626 m
->m_pkthdr
.bufstatus_sndbuf
;
1631 mbuf_get_buffer_status(const mbuf_t m
, mbuf_buffer_status_t
*buf_status
)
1633 if (m
== NULL
|| buf_status
== NULL
|| !(m
->m_flags
& M_PKTHDR
) ||
1634 !(m
->m_pkthdr
.pkt_flags
& PKTF_VALID_UNSENT_DATA
)) {
1638 buf_status
->buf_interface
= m
->m_pkthdr
.bufstatus_if
;
1639 buf_status
->buf_sndbuf
= m
->m_pkthdr
.bufstatus_sndbuf
;
1644 mbuf_pkt_new_flow(const mbuf_t m
, u_int32_t
*retval
)
1646 if (m
== NULL
|| retval
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1649 if (m
->m_pkthdr
.pkt_flags
& PKTF_NEW_FLOW
) {
1658 mbuf_last_pkt(const mbuf_t m
, u_int32_t
*retval
)
1660 if (m
== NULL
|| retval
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1663 if (m
->m_pkthdr
.pkt_flags
& PKTF_LAST_PKT
) {
1672 mbuf_get_timestamp(mbuf_t m
, u_int64_t
*ts
, boolean_t
*valid
)
1674 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || ts
== NULL
) {
1678 if ((m
->m_pkthdr
.pkt_flags
& PKTF_TS_VALID
) == 0) {
1679 if (valid
!= NULL
) {
1684 if (valid
!= NULL
) {
1687 *ts
= m
->m_pkthdr
.pkt_timestamp
;
1693 mbuf_set_timestamp(mbuf_t m
, u_int64_t ts
, boolean_t valid
)
1695 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1699 if (valid
== FALSE
) {
1700 m
->m_pkthdr
.pkt_flags
&= ~PKTF_TS_VALID
;
1701 m
->m_pkthdr
.pkt_timestamp
= 0;
1703 m
->m_pkthdr
.pkt_flags
|= PKTF_TS_VALID
;
1704 m
->m_pkthdr
.pkt_timestamp
= ts
;
1710 mbuf_get_status(mbuf_t m
, kern_return_t
*status
)
1712 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || status
== NULL
) {
1716 if ((m
->m_pkthdr
.pkt_flags
& PKTF_DRIVER_MTAG
) == 0) {
1719 *status
= m
->m_pkthdr
.drv_tx_status
;
1725 driver_mtag_init(mbuf_t m
)
1727 if ((m
->m_pkthdr
.pkt_flags
& PKTF_DRIVER_MTAG
) == 0) {
1728 m
->m_pkthdr
.pkt_flags
|= PKTF_DRIVER_MTAG
;
1729 bzero(&m
->m_pkthdr
.driver_mtag
,
1730 sizeof(m
->m_pkthdr
.driver_mtag
));
1735 mbuf_set_status(mbuf_t m
, kern_return_t status
)
1737 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1741 driver_mtag_init(m
);
1743 m
->m_pkthdr
.drv_tx_status
= status
;
1749 mbuf_get_flowid(mbuf_t m
, u_int16_t
*flowid
)
1751 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || flowid
== NULL
) {
1755 if ((m
->m_pkthdr
.pkt_flags
& PKTF_DRIVER_MTAG
) == 0) {
1758 *flowid
= m
->m_pkthdr
.drv_flowid
;
1764 mbuf_set_flowid(mbuf_t m
, u_int16_t flowid
)
1766 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1770 driver_mtag_init(m
);
1772 m
->m_pkthdr
.drv_flowid
= flowid
;
1778 mbuf_get_tx_compl_data(mbuf_t m
, uintptr_t *arg
, uintptr_t *data
)
1780 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || arg
== NULL
||
1785 if ((m
->m_pkthdr
.pkt_flags
& PKTF_DRIVER_MTAG
) == 0) {
1789 *arg
= m
->m_pkthdr
.drv_tx_compl_arg
;
1790 *data
= m
->m_pkthdr
.drv_tx_compl_data
;
1796 mbuf_set_tx_compl_data(mbuf_t m
, uintptr_t arg
, uintptr_t data
)
1798 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1802 driver_mtag_init(m
);
1804 m
->m_pkthdr
.drv_tx_compl_arg
= arg
;
1805 m
->m_pkthdr
.drv_tx_compl_data
= data
;
1811 get_tx_compl_callback_index_locked(mbuf_tx_compl_func callback
)
1815 for (i
= 0; i
< MAX_MBUF_TX_COMPL_FUNC
; i
++) {
1816 if (mbuf_tx_compl_table
[i
] == callback
) {
1824 get_tx_compl_callback_index(mbuf_tx_compl_func callback
)
1828 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock
);
1830 i
= get_tx_compl_callback_index_locked(callback
);
1832 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock
);
1838 m_get_tx_compl_callback(u_int32_t idx
)
1840 mbuf_tx_compl_func cb
;
1842 if (idx
>= MAX_MBUF_TX_COMPL_FUNC
) {
1846 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock
);
1847 cb
= mbuf_tx_compl_table
[idx
];
1848 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock
);
1853 mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback
)
1858 if (callback
== NULL
) {
1862 lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock
);
1864 i
= get_tx_compl_callback_index_locked(callback
);
1870 /* assume the worst */
1872 for (i
= 0; i
< MAX_MBUF_TX_COMPL_FUNC
; i
++) {
1873 if (mbuf_tx_compl_table
[i
] == NULL
) {
1874 mbuf_tx_compl_table
[i
] = callback
;
1880 lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock
);
1886 mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback
)
1891 if (callback
== NULL
) {
1895 lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock
);
1897 /* assume the worst */
1899 for (i
= 0; i
< MAX_MBUF_TX_COMPL_FUNC
; i
++) {
1900 if (mbuf_tx_compl_table
[i
] == callback
) {
1901 mbuf_tx_compl_table
[i
] = NULL
;
1907 lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock
);
1913 mbuf_get_timestamp_requested(mbuf_t m
, boolean_t
*requested
)
1915 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1919 if ((m
->m_pkthdr
.pkt_flags
& PKTF_TX_COMPL_TS_REQ
) == 0) {
1928 mbuf_set_timestamp_requested(mbuf_t m
, uintptr_t *pktid
,
1929 mbuf_tx_compl_func callback
)
1933 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || callback
== NULL
||
1938 i
= get_tx_compl_callback_index(callback
);
1939 if (i
== UINT32_MAX
) {
1943 #if (DEBUG || DEVELOPMENT)
1944 VERIFY(i
< sizeof(m
->m_pkthdr
.pkt_compl_callbacks
));
1945 #endif /* (DEBUG || DEVELOPMENT) */
1947 if ((m
->m_pkthdr
.pkt_flags
& PKTF_TX_COMPL_TS_REQ
) == 0) {
1948 m
->m_pkthdr
.pkt_compl_callbacks
= 0;
1949 m
->m_pkthdr
.pkt_flags
|= PKTF_TX_COMPL_TS_REQ
;
1950 m
->m_pkthdr
.pkt_compl_context
=
1951 atomic_add_32_ov(&mbuf_tx_compl_index
, 1);
1953 #if (DEBUG || DEVELOPMENT)
1954 if (mbuf_tx_compl_debug
!= 0) {
1955 OSIncrementAtomic64(&mbuf_tx_compl_outstanding
);
1957 #endif /* (DEBUG || DEVELOPMENT) */
1959 m
->m_pkthdr
.pkt_compl_callbacks
|= (1 << i
);
1960 *pktid
= m
->m_pkthdr
.pkt_compl_context
;
1966 m_do_tx_compl_callback(struct mbuf
*m
, struct ifnet
*ifp
)
1974 if ((m
->m_pkthdr
.pkt_flags
& PKTF_TX_COMPL_TS_REQ
) == 0) {
1978 #if (DEBUG || DEVELOPMENT)
1979 if (mbuf_tx_compl_debug
!= 0 && ifp
!= NULL
&&
1980 (ifp
->if_xflags
& IFXF_TIMESTAMP_ENABLED
) != 0 &&
1981 (m
->m_pkthdr
.pkt_flags
& PKTF_TS_VALID
) == 0) {
1982 struct timespec now
;
1985 net_timernsec(&now
, &m
->m_pkthdr
.pkt_timestamp
);
1987 #endif /* (DEBUG || DEVELOPMENT) */
1989 for (i
= 0; i
< MAX_MBUF_TX_COMPL_FUNC
; i
++) {
1990 mbuf_tx_compl_func callback
;
1992 if ((m
->m_pkthdr
.pkt_compl_callbacks
& (1 << i
)) == 0) {
1996 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock
);
1997 callback
= mbuf_tx_compl_table
[i
];
1998 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock
);
2000 if (callback
!= NULL
) {
2001 callback(m
->m_pkthdr
.pkt_compl_context
,
2003 (m
->m_pkthdr
.pkt_flags
& PKTF_TS_VALID
) ?
2004 m
->m_pkthdr
.pkt_timestamp
: 0,
2005 m
->m_pkthdr
.drv_tx_compl_arg
,
2006 m
->m_pkthdr
.drv_tx_compl_data
,
2007 m
->m_pkthdr
.drv_tx_status
);
2010 m
->m_pkthdr
.pkt_compl_callbacks
= 0;
2012 #if (DEBUG || DEVELOPMENT)
2013 if (mbuf_tx_compl_debug
!= 0) {
2014 OSDecrementAtomic64(&mbuf_tx_compl_outstanding
);
2016 atomic_add_64(&mbuf_tx_compl_aborted
, 1);
2019 #endif /* (DEBUG || DEVELOPMENT) */
2023 mbuf_get_keepalive_flag(mbuf_t m
, boolean_t
*is_keepalive
)
2025 if (m
== NULL
|| is_keepalive
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
2029 *is_keepalive
= (m
->m_pkthdr
.pkt_flags
& PKTF_KEEPALIVE
);
2035 mbuf_set_keepalive_flag(mbuf_t m
, boolean_t is_keepalive
)
2037 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
2042 m
->m_pkthdr
.pkt_flags
|= PKTF_KEEPALIVE
;
2044 m
->m_pkthdr
.pkt_flags
&= ~PKTF_KEEPALIVE
;