2 * Copyright (c) 2004-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <sys/param.h>
33 #include <sys/mcache.h>
34 #include <sys/socket.h>
35 #include <kern/debug.h>
36 #include <libkern/OSAtomic.h>
37 #include <kern/kalloc.h>
40 #include <netinet/in.h>
41 #include <netinet/ip_var.h>
43 #include "net/net_str_id.h"
45 /* mbuf flags visible to KPI clients; do not add private flags here */
46 static const mbuf_flags_t mbuf_flags_mask
= (MBUF_EXT
| MBUF_PKTHDR
| MBUF_EOR
|
47 MBUF_LOOP
| MBUF_BCAST
| MBUF_MCAST
| MBUF_FRAG
| MBUF_FIRSTFRAG
|
48 MBUF_LASTFRAG
| MBUF_PROMISC
| MBUF_HASFCS
);
50 /* Unalterable mbuf flags */
51 static const mbuf_flags_t mbuf_cflags_mask
= (MBUF_EXT
);
53 #define MAX_MBUF_TX_COMPL_FUNC 32
55 mbuf_tx_compl_table
[MAX_MBUF_TX_COMPL_FUNC
];
56 extern lck_rw_t
*mbuf_tx_compl_tbl_lock
;
57 u_int32_t mbuf_tx_compl_index
= 0;
59 #if (DEVELOPMENT || DEBUG)
60 int mbuf_tx_compl_debug
= 0;
61 SInt64 mbuf_tx_compl_outstanding
__attribute__((aligned(8))) = 0;
62 u_int64_t mbuf_tx_compl_aborted
__attribute__((aligned(8))) = 0;
64 SYSCTL_DECL(_kern_ipc
);
65 SYSCTL_NODE(_kern_ipc
, OID_AUTO
, mbtxcf
,
66 CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "");
67 SYSCTL_INT(_kern_ipc_mbtxcf
, OID_AUTO
, debug
,
68 CTLFLAG_RW
| CTLFLAG_LOCKED
, &mbuf_tx_compl_debug
, 0, "");
69 SYSCTL_INT(_kern_ipc_mbtxcf
, OID_AUTO
, index
,
70 CTLFLAG_RD
| CTLFLAG_LOCKED
, &mbuf_tx_compl_index
, 0, "");
71 SYSCTL_QUAD(_kern_ipc_mbtxcf
, OID_AUTO
, oustanding
,
72 CTLFLAG_RD
| CTLFLAG_LOCKED
, &mbuf_tx_compl_outstanding
, "");
73 SYSCTL_QUAD(_kern_ipc_mbtxcf
, OID_AUTO
, aborted
,
74 CTLFLAG_RD
| CTLFLAG_LOCKED
, &mbuf_tx_compl_aborted
, "");
75 #endif /* (DEBUG || DEVELOPMENT) */
78 mbuf_data(mbuf_t mbuf
)
80 return (mbuf
->m_data
);
84 mbuf_datastart(mbuf_t mbuf
)
86 if (mbuf
->m_flags
& M_EXT
)
87 return (mbuf
->m_ext
.ext_buf
);
88 if (mbuf
->m_flags
& M_PKTHDR
)
89 return (mbuf
->m_pktdat
);
94 mbuf_setdata(mbuf_t mbuf
, void *data
, size_t len
)
96 size_t start
= (size_t)((char *)mbuf_datastart(mbuf
));
97 size_t maxlen
= mbuf_maxlen(mbuf
);
99 if ((size_t)data
< start
|| ((size_t)data
) + len
> start
+ maxlen
)
108 mbuf_align_32(mbuf_t mbuf
, size_t len
)
110 if ((mbuf
->m_flags
& M_EXT
) != 0 && m_mclhasreference(mbuf
))
112 mbuf
->m_data
= mbuf_datastart(mbuf
);
114 ((mbuf_trailingspace(mbuf
) - len
) &~ (sizeof(u_int32_t
) - 1));
120 * This function is used to provide mcl_to_paddr via symbol indirection,
121 * please avoid any change in behavior or remove the indirection in
122 * config/Unsupported*
125 mbuf_data_to_physical(void *ptr
)
127 return ((addr64_t
)mcl_to_paddr(ptr
));
131 mbuf_get(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
133 /* Must set *mbuf to NULL in failure case */
134 *mbuf
= m_get(how
, type
);
136 return (*mbuf
== NULL
? ENOMEM
: 0);
140 mbuf_gethdr(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
142 /* Must set *mbuf to NULL in failure case */
143 *mbuf
= m_gethdr(how
, type
);
145 return (*mbuf
== NULL
? ENOMEM
: 0);
149 mbuf_attachcluster(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
,
150 caddr_t extbuf
, void (*extfree
)(caddr_t
, u_int
, caddr_t
),
151 size_t extsize
, caddr_t extarg
)
153 if (mbuf
== NULL
|| extbuf
== NULL
|| extfree
== NULL
|| extsize
== 0)
156 if ((*mbuf
= m_clattach(*mbuf
, type
, extbuf
,
157 extfree
, extsize
, extarg
, how
, 0)) == NULL
)
164 mbuf_ring_cluster_alloc(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
,
165 void (*extfree
)(caddr_t
, u_int
, caddr_t
), size_t *size
)
167 caddr_t extbuf
= NULL
;
170 if (mbuf
== NULL
|| extfree
== NULL
|| size
== NULL
|| *size
== 0)
173 if ((err
= mbuf_alloccluster(how
, size
, &extbuf
)) != 0)
176 if ((*mbuf
= m_clattach(*mbuf
, type
, extbuf
,
177 extfree
, *size
, NULL
, how
, 1)) == NULL
) {
178 mbuf_freecluster(extbuf
, *size
);
186 mbuf_ring_cluster_is_active(mbuf_t mbuf
)
188 return (m_ext_paired_is_active(mbuf
));
192 mbuf_ring_cluster_activate(mbuf_t mbuf
)
194 if (mbuf_ring_cluster_is_active(mbuf
))
197 m_ext_paired_activate(mbuf
);
202 mbuf_cluster_set_prop(mbuf_t mbuf
, u_int32_t oldprop
, u_int32_t newprop
)
204 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_EXT
))
207 return (m_ext_set_prop(mbuf
, oldprop
, newprop
) ? 0 : EBUSY
);
211 mbuf_cluster_get_prop(mbuf_t mbuf
, u_int32_t
*prop
)
213 if (mbuf
== NULL
|| prop
== NULL
|| !(mbuf
->m_flags
& M_EXT
))
216 *prop
= m_ext_get_prop(mbuf
);
221 mbuf_alloccluster(mbuf_how_t how
, size_t *size
, caddr_t
*addr
)
223 if (size
== NULL
|| *size
== 0 || addr
== NULL
)
228 /* Jumbo cluster pool not available? */
229 if (*size
> MBIGCLBYTES
&& njcl
== 0)
232 if (*size
<= MCLBYTES
&& (*addr
= m_mclalloc(how
)) != NULL
)
234 else if (*size
> MCLBYTES
&& *size
<= MBIGCLBYTES
&&
235 (*addr
= m_bigalloc(how
)) != NULL
)
237 else if (*size
> MBIGCLBYTES
&& *size
<= M16KCLBYTES
&&
238 (*addr
= m_16kalloc(how
)) != NULL
)
250 mbuf_freecluster(caddr_t addr
, size_t size
)
252 if (size
!= MCLBYTES
&& size
!= MBIGCLBYTES
&& size
!= M16KCLBYTES
)
253 panic("%s: invalid size (%ld) for cluster %p", __func__
,
256 if (size
== MCLBYTES
)
258 else if (size
== MBIGCLBYTES
)
259 m_bigfree(addr
, MBIGCLBYTES
, NULL
);
261 m_16kfree(addr
, M16KCLBYTES
, NULL
);
263 panic("%s: freeing jumbo cluster to an empty pool", __func__
);
267 mbuf_getcluster(mbuf_how_t how
, mbuf_type_t type
, size_t size
, mbuf_t
*mbuf
)
269 /* Must set *mbuf to NULL in failure case */
276 *mbuf
= m_get(how
, type
);
282 * At the time this code was written, m_{mclget,mbigget,m16kget}
283 * would always return the same value that was passed in to it.
285 if (size
== MCLBYTES
) {
286 *mbuf
= m_mclget(*mbuf
, how
);
287 } else if (size
== MBIGCLBYTES
) {
288 *mbuf
= m_mbigget(*mbuf
, how
);
289 } else if (size
== M16KCLBYTES
) {
291 *mbuf
= m_m16kget(*mbuf
, how
);
293 /* Jumbo cluster pool not available? */
301 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0)
304 if (created
&& error
!= 0) {
312 mbuf_mclget(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
314 /* Must set *mbuf to NULL in failure case */
320 error
= mbuf_get(how
, type
, mbuf
);
327 * At the time this code was written, m_mclget would always
328 * return the same value that was passed in to it.
330 *mbuf
= m_mclget(*mbuf
, how
);
332 if (created
&& ((*mbuf
)->m_flags
& M_EXT
) == 0) {
336 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0)
343 mbuf_getpacket(mbuf_how_t how
, mbuf_t
*mbuf
)
345 /* Must set *mbuf to NULL in failure case */
348 *mbuf
= m_getpacket_how(how
);
351 if (how
== MBUF_WAITOK
)
361 * This function is used to provide m_free via symbol indirection, please avoid
362 * any change in behavior or remove the indirection in config/Unsupported*
365 mbuf_free(mbuf_t mbuf
)
367 return (m_free(mbuf
));
371 * This function is used to provide m_freem via symbol indirection, please avoid
372 * any change in behavior or remove the indirection in config/Unsupported*
375 mbuf_freem(mbuf_t mbuf
)
381 mbuf_freem_list(mbuf_t mbuf
)
383 return (m_freem_list(mbuf
));
387 mbuf_leadingspace(const mbuf_t mbuf
)
389 return (m_leadingspace(mbuf
));
393 * This function is used to provide m_trailingspace via symbol indirection,
394 * please avoid any change in behavior or remove the indirection in
395 * config/Unsupported*
398 mbuf_trailingspace(const mbuf_t mbuf
)
400 return (m_trailingspace(mbuf
));
405 mbuf_copym(const mbuf_t src
, size_t offset
, size_t len
,
406 mbuf_how_t how
, mbuf_t
*new_mbuf
)
408 /* Must set *mbuf to NULL in failure case */
409 *new_mbuf
= m_copym(src
, offset
, len
, how
);
411 return (*new_mbuf
== NULL
? ENOMEM
: 0);
415 mbuf_dup(const mbuf_t src
, mbuf_how_t how
, mbuf_t
*new_mbuf
)
417 /* Must set *new_mbuf to NULL in failure case */
418 *new_mbuf
= m_dup(src
, how
);
420 return (*new_mbuf
== NULL
? ENOMEM
: 0);
424 mbuf_prepend(mbuf_t
*orig
, size_t len
, mbuf_how_t how
)
426 /* Must set *orig to NULL in failure case */
427 *orig
= m_prepend_2(*orig
, len
, how
, 0);
429 return (*orig
== NULL
? ENOMEM
: 0);
433 mbuf_split(mbuf_t src
, size_t offset
,
434 mbuf_how_t how
, mbuf_t
*new_mbuf
)
436 /* Must set *new_mbuf to NULL in failure case */
437 *new_mbuf
= m_split(src
, offset
, how
);
439 return (*new_mbuf
== NULL
? ENOMEM
: 0);
443 mbuf_pullup(mbuf_t
*mbuf
, size_t len
)
445 /* Must set *mbuf to NULL in failure case */
446 *mbuf
= m_pullup(*mbuf
, len
);
448 return (*mbuf
== NULL
? ENOMEM
: 0);
452 mbuf_pulldown(mbuf_t src
, size_t *offset
, size_t len
, mbuf_t
*location
)
454 /* Must set *location to NULL in failure case */
456 *location
= m_pulldown(src
, *offset
, len
, &new_offset
);
457 *offset
= new_offset
;
459 return (*location
== NULL
? ENOMEM
: 0);
463 * This function is used to provide m_adj via symbol indirection, please avoid
464 * any change in behavior or remove the indirection in config/Unsupported*
467 mbuf_adj(mbuf_t mbuf
, int len
)
473 mbuf_adjustlen(mbuf_t m
, int amount
)
475 /* Verify m_len will be valid after adding amount */
477 int used
= (size_t)mbuf_data(m
) - (size_t)mbuf_datastart(m
) +
480 if ((size_t)(amount
+ used
) > mbuf_maxlen(m
))
482 } else if (-amount
> m
->m_len
) {
491 mbuf_concatenate(mbuf_t dst
, mbuf_t src
)
498 /* return dst as is in the current implementation */
502 mbuf_copydata(const mbuf_t m0
, size_t off
, size_t len
, void *out_data
)
504 /* Copied m_copydata, added error handling (don't just panic) */
511 if (off
< (size_t)m
->m_len
)
519 count
= m
->m_len
- off
> len
? len
: m
->m_len
- off
;
520 bcopy(mtod(m
, caddr_t
) + off
, out_data
, count
);
522 out_data
= ((char *)out_data
) + count
;
531 mbuf_mclhasreference(mbuf_t mbuf
)
533 if ((mbuf
->m_flags
& M_EXT
))
534 return (m_mclhasreference(mbuf
));
542 mbuf_next(const mbuf_t mbuf
)
544 return (mbuf
->m_next
);
548 mbuf_setnext(mbuf_t mbuf
, mbuf_t next
)
550 if (next
&& ((next
)->m_nextpkt
!= NULL
||
551 (next
)->m_type
== MT_FREE
))
559 mbuf_nextpkt(const mbuf_t mbuf
)
561 return (mbuf
->m_nextpkt
);
565 mbuf_setnextpkt(mbuf_t mbuf
, mbuf_t nextpkt
)
567 mbuf
->m_nextpkt
= nextpkt
;
571 mbuf_len(const mbuf_t mbuf
)
573 return (mbuf
->m_len
);
577 mbuf_setlen(mbuf_t mbuf
, size_t len
)
583 mbuf_maxlen(const mbuf_t mbuf
)
585 if (mbuf
->m_flags
& M_EXT
)
586 return (mbuf
->m_ext
.ext_size
);
587 return (&mbuf
->m_dat
[MLEN
] - ((char *)mbuf_datastart(mbuf
)));
591 mbuf_type(const mbuf_t mbuf
)
593 return (mbuf
->m_type
);
597 mbuf_settype(mbuf_t mbuf
, mbuf_type_t new_type
)
599 if (new_type
== MBUF_TYPE_FREE
)
602 m_mchtype(mbuf
, new_type
);
608 mbuf_flags(const mbuf_t mbuf
)
610 return (mbuf
->m_flags
& mbuf_flags_mask
);
614 mbuf_setflags(mbuf_t mbuf
, mbuf_flags_t flags
)
617 mbuf_flags_t oflags
= mbuf
->m_flags
;
620 * 1. Return error if public but un-alterable flags are changed
622 * 2. Return error if bits other than public flags are set in passed
624 * Please note that private flag bits must be passed as reset by
625 * kexts, as they must use mbuf_flags KPI to get current set of
626 * mbuf flags and mbuf_flags KPI does not expose private flags.
628 if ((flags
^ oflags
) & mbuf_cflags_mask
) {
630 } else if (flags
& ~mbuf_flags_mask
) {
633 mbuf
->m_flags
= flags
| (mbuf
->m_flags
& ~mbuf_flags_mask
);
635 * If M_PKTHDR bit has changed, we have work to do;
636 * m_reinit() will take care of setting/clearing the
637 * bit, as well as the rest of bookkeeping.
639 if ((oflags
^ mbuf
->m_flags
) & M_PKTHDR
) {
640 mbuf
->m_flags
^= M_PKTHDR
; /* restore */
642 (mbuf
->m_flags
& M_PKTHDR
) ? 0 : 1);
650 mbuf_setflags_mask(mbuf_t mbuf
, mbuf_flags_t flags
, mbuf_flags_t mask
)
654 if (mask
& (~mbuf_flags_mask
| mbuf_cflags_mask
)) {
657 mbuf_flags_t oflags
= mbuf
->m_flags
;
658 mbuf
->m_flags
= (flags
& mask
) | (mbuf
->m_flags
& ~mask
);
660 * If M_PKTHDR bit has changed, we have work to do;
661 * m_reinit() will take care of setting/clearing the
662 * bit, as well as the rest of bookkeeping.
664 if ((oflags
^ mbuf
->m_flags
) & M_PKTHDR
) {
665 mbuf
->m_flags
^= M_PKTHDR
; /* restore */
667 (mbuf
->m_flags
& M_PKTHDR
) ? 0 : 1);
675 mbuf_copy_pkthdr(mbuf_t dest
, const mbuf_t src
)
677 if (((src
)->m_flags
& M_PKTHDR
) == 0)
680 m_copy_pkthdr(dest
, src
);
686 mbuf_pkthdr_len(const mbuf_t mbuf
)
688 return (mbuf
->m_pkthdr
.len
);
691 __private_extern__
size_t
692 mbuf_pkthdr_maxlen(mbuf_t m
)
698 maxlen
+= mbuf_maxlen(n
);
705 mbuf_pkthdr_setlen(mbuf_t mbuf
, size_t len
)
707 mbuf
->m_pkthdr
.len
= len
;
711 mbuf_pkthdr_adjustlen(mbuf_t mbuf
, int amount
)
713 mbuf
->m_pkthdr
.len
+= amount
;
717 mbuf_pkthdr_rcvif(const mbuf_t mbuf
)
720 * If we reference count ifnets, we should take a reference here
723 return (mbuf
->m_pkthdr
.rcvif
);
727 mbuf_pkthdr_setrcvif(mbuf_t mbuf
, ifnet_t ifnet
)
729 /* May want to walk ifnet list to determine if interface is valid */
730 mbuf
->m_pkthdr
.rcvif
= (struct ifnet
*)ifnet
;
735 mbuf_pkthdr_header(const mbuf_t mbuf
)
737 return (mbuf
->m_pkthdr
.pkt_hdr
);
741 mbuf_pkthdr_setheader(mbuf_t mbuf
, void *header
)
743 mbuf
->m_pkthdr
.pkt_hdr
= (void*)header
;
747 mbuf_inbound_modified(mbuf_t mbuf
)
749 /* Invalidate hardware generated checksum flags */
750 mbuf
->m_pkthdr
.csum_flags
= 0;
754 mbuf_outbound_finalize(struct mbuf
*m
, u_int32_t pf
, size_t o
)
756 /* Generate the packet in software, client needs it */
759 (void) in_finalize_cksum(m
, o
, m
->m_pkthdr
.csum_flags
);
765 * Checksum offload should not have been enabled when
766 * extension headers exist; indicate that the callee
767 * should skip such case by setting optlen to -1.
769 (void) in6_finalize_cksum(m
, o
, -1, -1, m
->m_pkthdr
.csum_flags
);
783 mbuf
->m_pkthdr
.csum_flags
|= CSUM_VLAN_TAG_VALID
;
784 mbuf
->m_pkthdr
.vlan_tag
= vlan
;
794 if ((mbuf
->m_pkthdr
.csum_flags
& CSUM_VLAN_TAG_VALID
) == 0)
795 return (ENXIO
); // No vlan tag set
797 *vlan
= mbuf
->m_pkthdr
.vlan_tag
;
806 mbuf
->m_pkthdr
.csum_flags
&= ~CSUM_VLAN_TAG_VALID
;
807 mbuf
->m_pkthdr
.vlan_tag
= 0;
812 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags
=
813 MBUF_CSUM_REQ_IP
| MBUF_CSUM_REQ_TCP
| MBUF_CSUM_REQ_UDP
|
814 MBUF_CSUM_PARTIAL
| MBUF_CSUM_REQ_TCPIPV6
| MBUF_CSUM_REQ_UDPIPV6
;
817 mbuf_set_csum_requested(
819 mbuf_csum_request_flags_t request
,
822 request
&= mbuf_valid_csum_request_flags
;
823 mbuf
->m_pkthdr
.csum_flags
=
824 (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | request
;
825 mbuf
->m_pkthdr
.csum_data
= value
;
830 static const mbuf_tso_request_flags_t mbuf_valid_tso_request_flags
=
831 MBUF_TSO_IPV4
| MBUF_TSO_IPV6
;
834 mbuf_get_tso_requested(
836 mbuf_tso_request_flags_t
*request
,
839 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
840 request
== NULL
|| value
== NULL
)
843 *request
= mbuf
->m_pkthdr
.csum_flags
;
844 *request
&= mbuf_valid_tso_request_flags
;
845 if (*request
&& value
!= NULL
)
846 *value
= mbuf
->m_pkthdr
.tso_segsz
;
852 mbuf_get_csum_requested(
854 mbuf_csum_request_flags_t
*request
,
857 *request
= mbuf
->m_pkthdr
.csum_flags
;
858 *request
&= mbuf_valid_csum_request_flags
;
860 *value
= mbuf
->m_pkthdr
.csum_data
;
867 mbuf_clear_csum_requested(
870 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
871 mbuf
->m_pkthdr
.csum_data
= 0;
876 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags
=
877 MBUF_CSUM_DID_IP
| MBUF_CSUM_IP_GOOD
| MBUF_CSUM_DID_DATA
|
878 MBUF_CSUM_PSEUDO_HDR
| MBUF_CSUM_PARTIAL
;
881 mbuf_set_csum_performed(
883 mbuf_csum_performed_flags_t performed
,
886 performed
&= mbuf_valid_csum_performed_flags
;
887 mbuf
->m_pkthdr
.csum_flags
=
888 (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | performed
;
889 mbuf
->m_pkthdr
.csum_data
= value
;
895 mbuf_get_csum_performed(
897 mbuf_csum_performed_flags_t
*performed
,
901 mbuf
->m_pkthdr
.csum_flags
& mbuf_valid_csum_performed_flags
;
902 *value
= mbuf
->m_pkthdr
.csum_data
;
908 mbuf_clear_csum_performed(
911 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
912 mbuf
->m_pkthdr
.csum_data
= 0;
918 mbuf_inet_cksum(mbuf_t mbuf
, int protocol
, u_int32_t offset
, u_int32_t length
,
921 if (mbuf
== NULL
|| length
== 0 || csum
== NULL
||
922 (u_int32_t
)mbuf
->m_pkthdr
.len
< (offset
+ length
))
925 *csum
= inet_cksum(mbuf
, protocol
, offset
, length
);
931 mbuf_inet6_cksum(mbuf_t mbuf
, int protocol
, u_int32_t offset
, u_int32_t length
,
934 if (mbuf
== NULL
|| length
== 0 || csum
== NULL
||
935 (u_int32_t
)mbuf
->m_pkthdr
.len
< (offset
+ length
))
938 *csum
= inet6_cksum(mbuf
, protocol
, offset
, length
);
943 mbuf_inet6_cksum(__unused mbuf_t mbuf
, __unused
int protocol
,
944 __unused u_int32_t offset
, __unused u_int32_t length
,
945 __unused u_int16_t
*csum
)
947 panic("mbuf_inet6_cksum() doesn't exist on this platform\n");
952 inet6_cksum(__unused
struct mbuf
*m
, __unused
unsigned int nxt
,
953 __unused
unsigned int off
, __unused
unsigned int len
)
955 panic("inet6_cksum() doesn't exist on this platform\n");
959 void nd6_lookup_ipv6(void);
961 nd6_lookup_ipv6(void)
963 panic("nd6_lookup_ipv6() doesn't exist on this platform\n");
967 in6addr_local(__unused
struct in6_addr
*a
)
969 panic("in6addr_local() doesn't exist on this platform\n");
973 void nd6_storelladdr(void);
975 nd6_storelladdr(void)
977 panic("nd6_storelladdr() doesn't exist on this platform\n");
985 #define MTAG_FIRST_ID FIRST_KPI_STR_ID
990 mbuf_tag_id_t
*out_id
)
992 return (net_str_id_find_internal(string
, out_id
, NSI_MBUF_TAG
, 1));
999 mbuf_tag_type_t type
,
1005 u_int32_t mtag_id_first
, mtag_id_last
;
1010 /* Sanity check parameters */
1011 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
,
1013 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
1014 id
< mtag_id_first
|| id
> mtag_id_last
|| length
< 1 ||
1015 (length
& 0xffff0000) != 0 || data_p
== NULL
) {
1019 /* Make sure this mtag hasn't already been allocated */
1020 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
1025 /* Allocate an mtag */
1026 tag
= m_tag_create(id
, type
, length
, how
, mbuf
);
1028 return (how
== M_WAITOK
? ENOMEM
: EWOULDBLOCK
);
1031 /* Attach the mtag and set *data_p */
1032 m_tag_prepend(mbuf
, tag
);
1042 mbuf_tag_type_t type
,
1047 u_int32_t mtag_id_first
, mtag_id_last
;
1054 /* Sanity check parameters */
1055 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
,
1057 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
1058 id
< mtag_id_first
|| id
> mtag_id_last
|| length
== NULL
||
1063 /* Locate an mtag */
1064 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
1069 /* Copy out the pointer to the data and the lenght value */
1070 *length
= tag
->m_tag_len
;
1080 mbuf_tag_type_t type
)
1083 u_int32_t mtag_id_first
, mtag_id_last
;
1085 /* Sanity check parameters */
1086 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
,
1088 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
1089 id
< mtag_id_first
|| id
> mtag_id_last
)
1092 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
1097 m_tag_delete(mbuf
, tag
);
1101 * Maximum length of driver auxiliary data; keep this small to
1102 * fit in a single mbuf to avoid wasting memory, rounded down to
1103 * the nearest 64-bit boundary. This takes into account mbuf
1104 * tag-related (m_taghdr + m_tag) as well m_drvaux_tag structs.
1106 #define MBUF_DRVAUX_MAXLEN \
1107 P2ROUNDDOWN(MLEN - sizeof (struct m_taghdr) - \
1108 M_TAG_ALIGN(sizeof (struct m_drvaux_tag)), sizeof (uint64_t))
1111 mbuf_add_drvaux(mbuf_t mbuf
, mbuf_how_t how
, u_int32_t family
,
1112 u_int32_t subfamily
, size_t length
, void **data_p
)
1114 struct m_drvaux_tag
*p
;
1117 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
) ||
1118 length
== 0 || length
> MBUF_DRVAUX_MAXLEN
)
1124 /* Check if one is already associated */
1125 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
1126 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) != NULL
)
1129 /* Tag is (m_drvaux_tag + module specific data) */
1130 if ((tag
= m_tag_create(KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_DRVAUX
,
1131 sizeof (*p
) + length
, how
, mbuf
)) == NULL
)
1132 return ((how
== MBUF_WAITOK
) ? ENOMEM
: EWOULDBLOCK
);
1134 p
= (struct m_drvaux_tag
*)(tag
+ 1);
1135 p
->da_family
= family
;
1136 p
->da_subfamily
= subfamily
;
1137 p
->da_length
= length
;
1139 /* Associate the tag */
1140 m_tag_prepend(mbuf
, tag
);
1149 mbuf_find_drvaux(mbuf_t mbuf
, u_int32_t
*family_p
, u_int32_t
*subfamily_p
,
1150 u_int32_t
*length_p
, void **data_p
)
1152 struct m_drvaux_tag
*p
;
1155 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
) || data_p
== NULL
)
1160 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
1161 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) == NULL
)
1164 /* Must be at least size of m_drvaux_tag */
1165 VERIFY(tag
->m_tag_len
>= sizeof (*p
));
1167 p
= (struct m_drvaux_tag
*)(tag
+ 1);
1168 VERIFY(p
->da_length
> 0 && p
->da_length
<= MBUF_DRVAUX_MAXLEN
);
1170 if (family_p
!= NULL
)
1171 *family_p
= p
->da_family
;
1172 if (subfamily_p
!= NULL
)
1173 *subfamily_p
= p
->da_subfamily
;
1174 if (length_p
!= NULL
)
1175 *length_p
= p
->da_length
;
1183 mbuf_del_drvaux(mbuf_t mbuf
)
1187 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
))
1190 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
1191 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) != NULL
)
1192 m_tag_delete(mbuf
, tag
);
1197 mbuf_stats(struct mbuf_stat
*stats
)
1199 stats
->mbufs
= mbstat
.m_mbufs
;
1200 stats
->clusters
= mbstat
.m_clusters
;
1201 stats
->clfree
= mbstat
.m_clfree
;
1202 stats
->drops
= mbstat
.m_drops
;
1203 stats
->wait
= mbstat
.m_wait
;
1204 stats
->drain
= mbstat
.m_drain
;
1205 __builtin_memcpy(stats
->mtypes
, mbstat
.m_mtypes
, sizeof(stats
->mtypes
));
1206 stats
->mcfail
= mbstat
.m_mcfail
;
1207 stats
->mpfail
= mbstat
.m_mpfail
;
1208 stats
->msize
= mbstat
.m_msize
;
1209 stats
->mclbytes
= mbstat
.m_mclbytes
;
1210 stats
->minclsize
= mbstat
.m_minclsize
;
1211 stats
->mlen
= mbstat
.m_mlen
;
1212 stats
->mhlen
= mbstat
.m_mhlen
;
1213 stats
->bigclusters
= mbstat
.m_bigclusters
;
1214 stats
->bigclfree
= mbstat
.m_bigclfree
;
1215 stats
->bigmclbytes
= mbstat
.m_bigmclbytes
;
1219 mbuf_allocpacket(mbuf_how_t how
, size_t packetlen
, unsigned int *maxchunks
,
1224 unsigned int numpkts
= 1;
1225 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
1227 if (packetlen
== 0) {
1231 m
= m_allocpacket_internal(&numpkts
, packetlen
,
1232 maxchunks
? &numchunks
: NULL
, how
, 1, 0);
1234 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
)
1240 *maxchunks
= numchunks
;
1249 mbuf_allocpacket_list(unsigned int numpkts
, mbuf_how_t how
, size_t packetlen
,
1250 unsigned int *maxchunks
, mbuf_t
*mbuf
)
1254 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
1260 if (packetlen
== 0) {
1264 m
= m_allocpacket_internal(&numpkts
, packetlen
,
1265 maxchunks
? &numchunks
: NULL
, how
, 1, 0);
1267 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
)
1273 *maxchunks
= numchunks
;
1281 __private_extern__
size_t
1282 mbuf_pkt_list_len(mbuf_t m
)
1288 len
+= mbuf_pkthdr_len(n
);
1289 n
= mbuf_nextpkt(n
);
1294 __private_extern__
size_t
1295 mbuf_pkt_list_maxlen(mbuf_t m
)
1301 maxlen
+= mbuf_pkthdr_maxlen(n
);
1302 n
= mbuf_nextpkt(n
);
1308 * mbuf_copyback differs from m_copyback in a few ways:
1309 * 1) mbuf_copyback will allocate clusters for new mbufs we append
1310 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
1311 * 3) mbuf_copyback reports whether or not the operation succeeded
1312 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
1327 const char *cp
= data
;
1329 if (m
== NULL
|| len
== 0 || data
== NULL
)
1332 while (off
> (mlen
= m
->m_len
)) {
1335 if (m
->m_next
== 0) {
1336 n
= m_getclr(how
, m
->m_type
);
1341 n
->m_len
= MIN(MLEN
, len
+ off
);
1348 mlen
= MIN(m
->m_len
- off
, len
);
1349 if (mlen
< len
&& m
->m_next
== NULL
&&
1350 mbuf_trailingspace(m
) > 0) {
1351 size_t grow
= MIN(mbuf_trailingspace(m
), len
- mlen
);
1355 bcopy(cp
, off
+ (char *)mbuf_data(m
), (unsigned)mlen
);
1363 if (m
->m_next
== 0) {
1364 n
= m_get(how
, m
->m_type
);
1369 if (len
> MINCLSIZE
) {
1371 * cluster allocation failure is okay,
1374 mbuf_mclget(how
, m
->m_type
, &n
);
1376 n
->m_len
= MIN(mbuf_maxlen(n
), len
);
1383 if ((m_start
->m_flags
& M_PKTHDR
) && (m_start
->m_pkthdr
.len
< totlen
))
1384 m_start
->m_pkthdr
.len
= totlen
;
1396 mbuf_get_mhlen(void)
1402 mbuf_get_minclsize(void)
1404 return (MHLEN
+ MLEN
);
1408 mbuf_get_traffic_class_max_count(void)
1410 return (MBUF_TC_MAX
);
1414 mbuf_get_traffic_class_index(mbuf_traffic_class_t tc
, u_int32_t
*index
)
1416 if (index
== NULL
|| (u_int32_t
)tc
>= MBUF_TC_MAX
)
1419 *index
= MBUF_SCIDX(m_service_class_from_val(MBUF_TC2SCVAL(tc
)));
1423 mbuf_traffic_class_t
1424 mbuf_get_traffic_class(mbuf_t m
)
1426 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1427 return (MBUF_TC_BE
);
1429 return (m_get_traffic_class(m
));
1433 mbuf_set_traffic_class(mbuf_t m
, mbuf_traffic_class_t tc
)
1435 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) ||
1436 ((u_int32_t
)tc
>= MBUF_TC_MAX
))
1439 return (m_set_traffic_class(m
, tc
));
1443 mbuf_is_traffic_class_privileged(mbuf_t m
)
1445 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) ||
1446 !MBUF_VALID_SC(m
->m_pkthdr
.pkt_svc
))
1449 return ((m
->m_pkthdr
.pkt_flags
& PKTF_PRIO_PRIVILEGED
) ? 1 : 0);
1453 mbuf_get_service_class_max_count(void)
1455 return (MBUF_SC_MAX_CLASSES
);
1459 mbuf_get_service_class_index(mbuf_svc_class_t sc
, u_int32_t
*index
)
1461 if (index
== NULL
|| !MBUF_VALID_SC(sc
))
1464 *index
= MBUF_SCIDX(sc
);
1469 mbuf_get_service_class(mbuf_t m
)
1471 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1472 return (MBUF_SC_BE
);
1474 return (m_get_service_class(m
));
1478 mbuf_set_service_class(mbuf_t m
, mbuf_svc_class_t sc
)
1480 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1483 return (m_set_service_class(m
, sc
));
1487 mbuf_pkthdr_aux_flags(mbuf_t m
, mbuf_pkthdr_aux_flags_t
*flagsp
)
1491 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || flagsp
== NULL
)
1495 flags
= m
->m_pkthdr
.pkt_flags
;
1496 if ((flags
& (PKTF_INET_RESOLVE
|PKTF_RESOLVE_RTR
)) ==
1497 (PKTF_INET_RESOLVE
|PKTF_RESOLVE_RTR
))
1498 *flagsp
|= MBUF_PKTAUXF_INET_RESOLVE_RTR
;
1499 if ((flags
& (PKTF_INET6_RESOLVE
|PKTF_RESOLVE_RTR
)) ==
1500 (PKTF_INET6_RESOLVE
|PKTF_RESOLVE_RTR
))
1501 *flagsp
|= MBUF_PKTAUXF_INET6_RESOLVE_RTR
;
1503 /* These 2 flags are mutually exclusive */
1505 (MBUF_PKTAUXF_INET_RESOLVE_RTR
| MBUF_PKTAUXF_INET6_RESOLVE_RTR
)) !=
1506 (MBUF_PKTAUXF_INET_RESOLVE_RTR
| MBUF_PKTAUXF_INET6_RESOLVE_RTR
));
1512 mbuf_get_driver_scratch(mbuf_t m
, u_int8_t
**area
, size_t *area_len
)
1514 if (m
== NULL
|| area
== NULL
|| area_len
== NULL
||
1515 !(m
->m_flags
& M_PKTHDR
))
1518 *area_len
= m_scratch_get(m
, area
);
1523 mbuf_get_unsent_data_bytes(const mbuf_t m
, u_int32_t
*unsent_data
)
1525 if (m
== NULL
|| unsent_data
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1528 if (!(m
->m_pkthdr
.pkt_flags
& PKTF_VALID_UNSENT_DATA
))
1531 *unsent_data
= m
->m_pkthdr
.bufstatus_if
+
1532 m
->m_pkthdr
.bufstatus_sndbuf
;
1537 mbuf_get_buffer_status(const mbuf_t m
, mbuf_buffer_status_t
*buf_status
)
1539 if (m
== NULL
|| buf_status
== NULL
|| !(m
->m_flags
& M_PKTHDR
) ||
1540 !(m
->m_pkthdr
.pkt_flags
& PKTF_VALID_UNSENT_DATA
))
1543 buf_status
->buf_interface
= m
->m_pkthdr
.bufstatus_if
;
1544 buf_status
->buf_sndbuf
= m
->m_pkthdr
.bufstatus_sndbuf
;
1549 mbuf_pkt_new_flow(const mbuf_t m
, u_int32_t
*retval
)
1551 if (m
== NULL
|| retval
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1553 if (m
->m_pkthdr
.pkt_flags
& PKTF_NEW_FLOW
)
1561 mbuf_last_pkt(const mbuf_t m
, u_int32_t
*retval
)
1563 if (m
== NULL
|| retval
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1565 if (m
->m_pkthdr
.pkt_flags
& PKTF_LAST_PKT
)
1573 mbuf_get_timestamp(mbuf_t m
, u_int64_t
*ts
, boolean_t
*valid
)
1575 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || ts
== NULL
)
1578 if ((m
->m_pkthdr
.pkt_flags
& PKTF_TS_VALID
) == 0) {
1585 *ts
= m
->m_pkthdr
.pkt_timestamp
;
1591 mbuf_set_timestamp(mbuf_t m
, u_int64_t ts
, boolean_t valid
)
1593 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1596 if (valid
== FALSE
) {
1597 m
->m_pkthdr
.pkt_flags
&= ~PKTF_TS_VALID
;
1598 m
->m_pkthdr
.pkt_timestamp
= 0;
1600 m
->m_pkthdr
.pkt_flags
|= PKTF_TS_VALID
;
1601 m
->m_pkthdr
.pkt_timestamp
= ts
;
1607 mbuf_get_status(mbuf_t m
, kern_return_t
*status
)
1609 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || status
== NULL
)
1612 if ((m
->m_pkthdr
.pkt_flags
& PKTF_DRIVER_MTAG
) == 0) {
1615 *status
= m
->m_pkthdr
.drv_tx_status
;
1621 driver_mtag_init(mbuf_t m
)
1623 if ((m
->m_pkthdr
.pkt_flags
& PKTF_DRIVER_MTAG
) == 0) {
1624 m
->m_pkthdr
.pkt_flags
|= PKTF_DRIVER_MTAG
;
1625 bzero(&m
->m_pkthdr
.driver_mtag
,
1626 sizeof(m
->m_pkthdr
.driver_mtag
));
1631 mbuf_set_status(mbuf_t m
, kern_return_t status
)
1633 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1636 driver_mtag_init(m
);
1638 m
->m_pkthdr
.drv_tx_status
= status
;
1644 mbuf_get_flowid(mbuf_t m
, u_int16_t
*flowid
)
1646 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || flowid
== NULL
)
1649 if ((m
->m_pkthdr
.pkt_flags
& PKTF_DRIVER_MTAG
) == 0) {
1652 *flowid
= m
->m_pkthdr
.drv_flowid
;
1658 mbuf_set_flowid(mbuf_t m
, u_int16_t flowid
)
1660 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1663 driver_mtag_init(m
);
1665 m
->m_pkthdr
.drv_flowid
= flowid
;
1671 mbuf_get_tx_compl_data(mbuf_t m
, uintptr_t *arg
, uintptr_t *data
)
1673 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || arg
== NULL
||
1677 if ((m
->m_pkthdr
.pkt_flags
& PKTF_DRIVER_MTAG
) == 0) {
1681 *arg
= m
->m_pkthdr
.drv_tx_compl_arg
;
1682 *data
= m
->m_pkthdr
.drv_tx_compl_data
;
1688 mbuf_set_tx_compl_data(mbuf_t m
, uintptr_t arg
, uintptr_t data
)
1690 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1693 driver_mtag_init(m
);
1695 m
->m_pkthdr
.drv_tx_compl_arg
= arg
;
1696 m
->m_pkthdr
.drv_tx_compl_data
= data
;
1702 get_tx_compl_callback_index_locked(mbuf_tx_compl_func callback
)
1706 for (i
= 0; i
< MAX_MBUF_TX_COMPL_FUNC
; i
++) {
1707 if (mbuf_tx_compl_table
[i
] == callback
) {
1711 return (UINT32_MAX
);
1715 get_tx_compl_callback_index(mbuf_tx_compl_func callback
)
1719 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock
);
1721 i
= get_tx_compl_callback_index_locked(callback
);
1723 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock
);
1729 mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback
)
1734 if (callback
== NULL
)
1737 lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock
);
1739 i
= get_tx_compl_callback_index_locked(callback
);
1745 /* assume the worst */
1747 for (i
= 0; i
< MAX_MBUF_TX_COMPL_FUNC
; i
++) {
1748 if (mbuf_tx_compl_table
[i
] == NULL
) {
1749 mbuf_tx_compl_table
[i
] = callback
;
1755 lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock
);
1761 mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback
)
1766 if (callback
== NULL
)
1769 lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock
);
1771 /* assume the worst */
1773 for (i
= 0; i
< MAX_MBUF_TX_COMPL_FUNC
; i
++) {
1774 if (mbuf_tx_compl_table
[i
] == callback
) {
1775 mbuf_tx_compl_table
[i
] = NULL
;
1781 lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock
);
1787 mbuf_get_timestamp_requested(mbuf_t m
, boolean_t
*requested
)
1789 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1792 if ((m
->m_pkthdr
.pkt_flags
& PKTF_TX_COMPL_TS_REQ
) == 0) {
1801 mbuf_set_timestamp_requested(mbuf_t m
, uintptr_t *pktid
,
1802 mbuf_tx_compl_func callback
)
1806 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || callback
== NULL
||
1810 i
= get_tx_compl_callback_index(callback
);
1811 if (i
== UINT32_MAX
)
1814 #if (DEBUG || DEVELOPMENT)
1815 VERIFY(i
< sizeof(m
->m_pkthdr
.pkt_compl_callbacks
));
1816 #endif /* (DEBUG || DEVELOPMENT) */
1818 if ((m
->m_pkthdr
.pkt_flags
& PKTF_TX_COMPL_TS_REQ
) == 0) {
1819 m
->m_pkthdr
.pkt_compl_callbacks
= 0;
1820 m
->m_pkthdr
.pkt_flags
|= PKTF_TX_COMPL_TS_REQ
;
1821 m
->m_pkthdr
.pkt_compl_context
=
1822 atomic_add_32_ov(&mbuf_tx_compl_index
, 1);
1824 #if (DEBUG || DEVELOPMENT)
1825 if (mbuf_tx_compl_debug
!= 0) {
1826 OSIncrementAtomic64(&mbuf_tx_compl_outstanding
);
1828 #endif /* (DEBUG || DEVELOPMENT) */
1830 m
->m_pkthdr
.pkt_compl_callbacks
|= (1 << i
);
1831 *pktid
= m
->m_pkthdr
.pkt_compl_context
;
1837 m_do_tx_compl_callback(struct mbuf
*m
, struct ifnet
*ifp
)
1844 if ((m
->m_pkthdr
.pkt_flags
& PKTF_TX_COMPL_TS_REQ
) == 0)
1847 #if (DEBUG || DEVELOPMENT)
1848 if (mbuf_tx_compl_debug
!= 0 && ifp
!= NULL
&&
1849 (ifp
->if_xflags
& IFXF_TIMESTAMP_ENABLED
) != 0 &&
1850 (m
->m_pkthdr
.pkt_flags
& PKTF_TS_VALID
) == 0) {
1851 struct timespec now
;
1854 net_timernsec(&now
, &m
->m_pkthdr
.pkt_timestamp
);
1856 #endif /* (DEBUG || DEVELOPMENT) */
1858 for (i
= 0; i
< MAX_MBUF_TX_COMPL_FUNC
; i
++) {
1859 mbuf_tx_compl_func callback
;
1861 if ((m
->m_pkthdr
.pkt_compl_callbacks
& (1 << i
)) == 0)
1864 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock
);
1865 callback
= mbuf_tx_compl_table
[i
];
1866 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock
);
1868 if (callback
!= NULL
) {
1869 callback(m
->m_pkthdr
.pkt_compl_context
,
1871 (m
->m_pkthdr
.pkt_flags
& PKTF_TS_VALID
) ?
1872 m
->m_pkthdr
.pkt_timestamp
: 0,
1873 m
->m_pkthdr
.drv_tx_compl_arg
,
1874 m
->m_pkthdr
.drv_tx_compl_data
,
1875 m
->m_pkthdr
.drv_tx_status
);
1878 m
->m_pkthdr
.pkt_compl_callbacks
= 0;
1880 #if (DEBUG || DEVELOPMENT)
1881 if (mbuf_tx_compl_debug
!= 0) {
1882 OSDecrementAtomic64(&mbuf_tx_compl_outstanding
);
1884 atomic_add_64(&mbuf_tx_compl_aborted
, 1);
1886 #endif /* (DEBUG || DEVELOPMENT) */