2 * Copyright (c) 2004-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 //#include <sys/kpi_interface.h>
32 #include <sys/param.h>
34 #include <sys/mcache.h>
35 #include <sys/socket.h>
36 #include <kern/debug.h>
37 #include <libkern/OSAtomic.h>
38 #include <kern/kalloc.h>
40 #include <netinet/in.h>
41 #include <netinet/ip_var.h>
43 #include "net/net_str_id.h"
45 /* mbuf flags visible to KPI clients; do not add private flags here */
46 static const mbuf_flags_t mbuf_flags_mask
= (MBUF_EXT
| MBUF_PKTHDR
| MBUF_EOR
|
47 MBUF_LOOP
| MBUF_BCAST
| MBUF_MCAST
| MBUF_FRAG
| MBUF_FIRSTFRAG
|
48 MBUF_LASTFRAG
| MBUF_PROMISC
| MBUF_HASFCS
);
50 /* Unalterable mbuf flags */
51 static const mbuf_flags_t mbuf_cflags_mask
= (MBUF_EXT
);
53 void* mbuf_data(mbuf_t mbuf
)
58 void* mbuf_datastart(mbuf_t mbuf
)
60 if (mbuf
->m_flags
& M_EXT
)
61 return mbuf
->m_ext
.ext_buf
;
62 if (mbuf
->m_flags
& M_PKTHDR
)
63 return mbuf
->m_pktdat
;
67 errno_t
mbuf_setdata(mbuf_t mbuf
, void* data
, size_t len
)
69 size_t start
= (size_t)((char*)mbuf_datastart(mbuf
));
70 size_t maxlen
= mbuf_maxlen(mbuf
);
72 if ((size_t)data
< start
|| ((size_t)data
) + len
> start
+ maxlen
)
80 errno_t
mbuf_align_32(mbuf_t mbuf
, size_t len
)
82 if ((mbuf
->m_flags
& M_EXT
) != 0 && m_mclhasreference(mbuf
))
84 mbuf
->m_data
= mbuf_datastart(mbuf
);
85 mbuf
->m_data
+= ((mbuf_trailingspace(mbuf
) - len
) &~ (sizeof(u_int32_t
) - 1));
90 /* This function is used to provide mcl_to_paddr via symbol indirection,
91 * please avoid any change in behavior or remove the indirection in
94 addr64_t
mbuf_data_to_physical(void* ptr
)
96 return ((addr64_t
)mcl_to_paddr(ptr
));
99 errno_t
mbuf_get(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
101 /* Must set *mbuf to NULL in failure case */
102 *mbuf
= m_get(how
, type
);
104 return (*mbuf
== NULL
) ? ENOMEM
: 0;
107 errno_t
mbuf_gethdr(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
109 /* Must set *mbuf to NULL in failure case */
110 *mbuf
= m_gethdr(how
, type
);
112 return (*mbuf
== NULL
) ? ENOMEM
: 0;
116 mbuf_attachcluster(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
,
117 caddr_t extbuf
, void (*extfree
)(caddr_t
, u_int
, caddr_t
),
118 size_t extsize
, caddr_t extarg
)
120 if (mbuf
== NULL
|| extbuf
== NULL
|| extfree
== NULL
|| extsize
== 0)
123 if ((*mbuf
= m_clattach(*mbuf
, type
, extbuf
,
124 extfree
, extsize
, extarg
, how
)) == NULL
)
131 mbuf_alloccluster(mbuf_how_t how
, size_t *size
, caddr_t
*addr
)
133 if (size
== NULL
|| *size
== 0 || addr
== NULL
)
138 /* Jumbo cluster pool not available? */
139 if (*size
> MBIGCLBYTES
&& njcl
== 0)
142 if (*size
<= MCLBYTES
&& (*addr
= m_mclalloc(how
)) != NULL
)
144 else if (*size
> MCLBYTES
&& *size
<= MBIGCLBYTES
&&
145 (*addr
= m_bigalloc(how
)) != NULL
)
147 else if (*size
> MBIGCLBYTES
&& *size
<= M16KCLBYTES
&&
148 (*addr
= m_16kalloc(how
)) != NULL
)
160 mbuf_freecluster(caddr_t addr
, size_t size
)
162 if (size
!= MCLBYTES
&& size
!= MBIGCLBYTES
&& size
!= M16KCLBYTES
)
163 panic("%s: invalid size (%ld) for cluster %p", __func__
,
166 if (size
== MCLBYTES
)
168 else if (size
== MBIGCLBYTES
)
169 m_bigfree(addr
, MBIGCLBYTES
, NULL
);
171 m_16kfree(addr
, M16KCLBYTES
, NULL
);
173 panic("%s: freeing jumbo cluster to an empty pool", __func__
);
177 mbuf_getcluster(mbuf_how_t how
, mbuf_type_t type
, size_t size
, mbuf_t
* mbuf
)
179 /* Must set *mbuf to NULL in failure case */
186 *mbuf
= m_get(how
, type
);
192 * At the time this code was written, m_{mclget,mbigget,m16kget}
193 * would always return the same value that was passed in to it.
195 if (size
== MCLBYTES
) {
196 *mbuf
= m_mclget(*mbuf
, how
);
197 } else if (size
== MBIGCLBYTES
) {
198 *mbuf
= m_mbigget(*mbuf
, how
);
199 } else if (size
== M16KCLBYTES
) {
201 *mbuf
= m_m16kget(*mbuf
, how
);
203 /* Jumbo cluster pool not available? */
211 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0)
214 if (created
&& error
!= 0) {
221 errno_t
mbuf_mclget(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
223 /* Must set *mbuf to NULL in failure case */
226 if (mbuf
== NULL
) return EINVAL
;
228 error
= mbuf_get(how
, type
, mbuf
);
235 * At the time this code was written, m_mclget would always
236 * return the same value that was passed in to it.
238 *mbuf
= m_mclget(*mbuf
, how
);
240 if (created
&& ((*mbuf
)->m_flags
& M_EXT
) == 0) {
244 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0)
250 errno_t
mbuf_getpacket(mbuf_how_t how
, mbuf_t
*mbuf
)
252 /* Must set *mbuf to NULL in failure case */
255 *mbuf
= m_getpacket_how(how
);
258 if (how
== MBUF_WAITOK
)
267 /* This function is used to provide m_free via symbol indirection, please avoid
268 * any change in behavior or remove the indirection in config/Unsupported*
270 mbuf_t
mbuf_free(mbuf_t mbuf
)
275 /* This function is used to provide m_freem via symbol indirection, please avoid
276 * any change in behavior or remove the indirection in config/Unsupported*
278 void mbuf_freem(mbuf_t mbuf
)
283 int mbuf_freem_list(mbuf_t mbuf
)
285 return m_freem_list(mbuf
);
288 size_t mbuf_leadingspace(const mbuf_t mbuf
)
290 return m_leadingspace(mbuf
);
293 /* This function is used to provide m_trailingspace via symbol indirection,
294 * please avoid any change in behavior or remove the indirection in
295 * config/Unsupported*
297 size_t mbuf_trailingspace(const mbuf_t mbuf
)
299 return m_trailingspace(mbuf
);
303 errno_t
mbuf_copym(const mbuf_t src
, size_t offset
, size_t len
,
304 mbuf_how_t how
, mbuf_t
*new_mbuf
)
306 /* Must set *mbuf to NULL in failure case */
307 *new_mbuf
= m_copym(src
, offset
, len
, how
);
309 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
312 errno_t
mbuf_dup(const mbuf_t src
, mbuf_how_t how
, mbuf_t
*new_mbuf
)
314 /* Must set *new_mbuf to NULL in failure case */
315 *new_mbuf
= m_dup(src
, how
);
317 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
320 errno_t
mbuf_prepend(mbuf_t
*orig
, size_t len
, mbuf_how_t how
)
322 /* Must set *orig to NULL in failure case */
323 *orig
= m_prepend_2(*orig
, len
, how
);
325 return (*orig
== NULL
) ? ENOMEM
: 0;
328 errno_t
mbuf_split(mbuf_t src
, size_t offset
,
329 mbuf_how_t how
, mbuf_t
*new_mbuf
)
331 /* Must set *new_mbuf to NULL in failure case */
332 *new_mbuf
= m_split(src
, offset
, how
);
334 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
337 errno_t
mbuf_pullup(mbuf_t
*mbuf
, size_t len
)
339 /* Must set *mbuf to NULL in failure case */
340 *mbuf
= m_pullup(*mbuf
, len
);
342 return (*mbuf
== NULL
) ? ENOMEM
: 0;
345 errno_t
mbuf_pulldown(mbuf_t src
, size_t *offset
, size_t len
, mbuf_t
*location
)
347 /* Must set *location to NULL in failure case */
349 *location
= m_pulldown(src
, *offset
, len
, &new_offset
);
350 *offset
= new_offset
;
352 return (*location
== NULL
) ? ENOMEM
: 0;
355 /* This function is used to provide m_adj via symbol indirection, please avoid
356 * any change in behavior or remove the indirection in config/Unsupported*
358 void mbuf_adj(mbuf_t mbuf
, int len
)
363 errno_t
mbuf_adjustlen(mbuf_t m
, int amount
)
365 /* Verify m_len will be valid after adding amount */
367 int used
= (size_t)mbuf_data(m
) - (size_t)mbuf_datastart(m
) +
370 if ((size_t)(amount
+ used
) > mbuf_maxlen(m
))
373 else if (-amount
> m
->m_len
) {
382 mbuf_concatenate(mbuf_t dst
, mbuf_t src
)
389 /* return dst as is in the current implementation */
392 errno_t
mbuf_copydata(const mbuf_t m0
, size_t off
, size_t len
, void* out_data
)
394 /* Copied m_copydata, added error handling (don't just panic) */
401 if (off
< (size_t)m
->m_len
)
409 count
= m
->m_len
- off
> len
? len
: m
->m_len
- off
;
410 bcopy(mtod(m
, caddr_t
) + off
, out_data
, count
);
412 out_data
= ((char*)out_data
) + count
;
420 int mbuf_mclhasreference(mbuf_t mbuf
)
422 if ((mbuf
->m_flags
& M_EXT
))
423 return m_mclhasreference(mbuf
);
430 mbuf_t
mbuf_next(const mbuf_t mbuf
)
435 errno_t
mbuf_setnext(mbuf_t mbuf
, mbuf_t next
)
437 if (next
&& ((next
)->m_nextpkt
!= NULL
||
438 (next
)->m_type
== MT_FREE
)) return EINVAL
;
444 mbuf_t
mbuf_nextpkt(const mbuf_t mbuf
)
446 return mbuf
->m_nextpkt
;
449 void mbuf_setnextpkt(mbuf_t mbuf
, mbuf_t nextpkt
)
451 mbuf
->m_nextpkt
= nextpkt
;
454 size_t mbuf_len(const mbuf_t mbuf
)
459 void mbuf_setlen(mbuf_t mbuf
, size_t len
)
464 size_t mbuf_maxlen(const mbuf_t mbuf
)
466 if (mbuf
->m_flags
& M_EXT
)
467 return mbuf
->m_ext
.ext_size
;
468 return &mbuf
->m_dat
[MLEN
] - ((char*)mbuf_datastart(mbuf
));
471 mbuf_type_t
mbuf_type(const mbuf_t mbuf
)
476 errno_t
mbuf_settype(mbuf_t mbuf
, mbuf_type_t new_type
)
478 if (new_type
== MBUF_TYPE_FREE
) return EINVAL
;
480 m_mchtype(mbuf
, new_type
);
486 mbuf_flags(const mbuf_t mbuf
)
488 return (mbuf
->m_flags
& mbuf_flags_mask
);
492 mbuf_setflags(mbuf_t mbuf
, mbuf_flags_t flags
)
495 mbuf_flags_t oflags
= mbuf
->m_flags
;
498 * 1. Return error if public but un-alterable flags are changed
500 * 2. Return error if bits other than public flags are set in passed
502 * Please note that private flag bits must be passed as reset by kexts,
503 * as they must use mbuf_flags KPI to get current set of mbuf flags
504 * and mbuf_flags KPI does not expose private flags.
506 if ((flags
^ oflags
) & mbuf_cflags_mask
) {
508 } else if (flags
& ~mbuf_flags_mask
) {
511 mbuf
->m_flags
= flags
| (mbuf
->m_flags
& ~mbuf_flags_mask
);
513 * If M_PKTHDR bit has changed, we have work to do;
514 * m_reinit() will take care of setting/clearing the
515 * bit, as well as the rest of bookkeeping.
517 if ((oflags
^ mbuf
->m_flags
) & M_PKTHDR
) {
518 mbuf
->m_flags
^= M_PKTHDR
; /* restore */
520 (mbuf
->m_flags
& M_PKTHDR
) ? 0 : 1);
528 mbuf_setflags_mask(mbuf_t mbuf
, mbuf_flags_t flags
, mbuf_flags_t mask
)
532 if (mask
& (~mbuf_flags_mask
| mbuf_cflags_mask
)) {
535 mbuf_flags_t oflags
= mbuf
->m_flags
;
536 mbuf
->m_flags
= (flags
& mask
) | (mbuf
->m_flags
& ~mask
);
538 * If M_PKTHDR bit has changed, we have work to do;
539 * m_reinit() will take care of setting/clearing the
540 * bit, as well as the rest of bookkeeping.
542 if ((oflags
^ mbuf
->m_flags
) & M_PKTHDR
) {
543 mbuf
->m_flags
^= M_PKTHDR
; /* restore */
545 (mbuf
->m_flags
& M_PKTHDR
) ? 0 : 1);
552 errno_t
mbuf_copy_pkthdr(mbuf_t dest
, const mbuf_t src
)
554 if (((src
)->m_flags
& M_PKTHDR
) == 0)
557 m_copy_pkthdr(dest
, src
);
562 size_t mbuf_pkthdr_len(const mbuf_t mbuf
)
564 return mbuf
->m_pkthdr
.len
;
567 __private_extern__
size_t mbuf_pkthdr_maxlen(mbuf_t m
)
573 maxlen
+= mbuf_maxlen(n
);
579 void mbuf_pkthdr_setlen(mbuf_t mbuf
, size_t len
)
581 mbuf
->m_pkthdr
.len
= len
;
584 void mbuf_pkthdr_adjustlen(mbuf_t mbuf
, int amount
)
586 mbuf
->m_pkthdr
.len
+= amount
;
589 ifnet_t
mbuf_pkthdr_rcvif(const mbuf_t mbuf
)
591 // If we reference count ifnets, we should take a reference here before returning
592 return mbuf
->m_pkthdr
.rcvif
;
595 errno_t
mbuf_pkthdr_setrcvif(mbuf_t mbuf
, ifnet_t ifnet
)
597 /* May want to walk ifnet list to determine if interface is valid */
598 mbuf
->m_pkthdr
.rcvif
= (struct ifnet
*)ifnet
;
602 void* mbuf_pkthdr_header(const mbuf_t mbuf
)
604 return mbuf
->m_pkthdr
.pkt_hdr
;
607 void mbuf_pkthdr_setheader(mbuf_t mbuf
, void *header
)
609 mbuf
->m_pkthdr
.pkt_hdr
= (void*)header
;
613 mbuf_inbound_modified(mbuf_t mbuf
)
615 /* Invalidate hardware generated checksum flags */
616 mbuf
->m_pkthdr
.csum_flags
= 0;
620 mbuf_outbound_finalize(struct mbuf
*m
, u_int32_t pf
, size_t o
)
622 /* Generate the packet in software, client needs it */
625 (void) in_finalize_cksum(m
, o
, m
->m_pkthdr
.csum_flags
);
631 * Checksum offload should not have been enabled when
632 * extension headers exist; indicate that the callee
633 * should skip such case by setting optlen to -1.
635 (void) in6_finalize_cksum(m
, o
, -1, -1, m
->m_pkthdr
.csum_flags
);
649 mbuf
->m_pkthdr
.csum_flags
|= CSUM_VLAN_TAG_VALID
;
650 mbuf
->m_pkthdr
.vlan_tag
= vlan
;
660 if ((mbuf
->m_pkthdr
.csum_flags
& CSUM_VLAN_TAG_VALID
) == 0)
661 return ENXIO
; // No vlan tag set
663 *vlan
= mbuf
->m_pkthdr
.vlan_tag
;
672 mbuf
->m_pkthdr
.csum_flags
&= ~CSUM_VLAN_TAG_VALID
;
673 mbuf
->m_pkthdr
.vlan_tag
= 0;
678 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags
=
679 MBUF_CSUM_REQ_IP
| MBUF_CSUM_REQ_TCP
| MBUF_CSUM_REQ_UDP
|
680 MBUF_CSUM_PARTIAL
| MBUF_CSUM_REQ_TCPIPV6
| MBUF_CSUM_REQ_UDPIPV6
;
683 mbuf_set_csum_requested(
685 mbuf_csum_request_flags_t request
,
688 request
&= mbuf_valid_csum_request_flags
;
689 mbuf
->m_pkthdr
.csum_flags
= (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | request
;
690 mbuf
->m_pkthdr
.csum_data
= value
;
695 static const mbuf_tso_request_flags_t mbuf_valid_tso_request_flags
=
696 MBUF_TSO_IPV4
| MBUF_TSO_IPV6
;
699 mbuf_get_tso_requested(
701 mbuf_tso_request_flags_t
*request
,
704 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
705 request
== NULL
|| value
== NULL
)
708 *request
= mbuf
->m_pkthdr
.csum_flags
;
709 *request
&= mbuf_valid_tso_request_flags
;
710 if (*request
&& value
!= NULL
)
711 *value
= mbuf
->m_pkthdr
.tso_segsz
;
717 mbuf_get_csum_requested(
719 mbuf_csum_request_flags_t
*request
,
722 *request
= mbuf
->m_pkthdr
.csum_flags
;
723 *request
&= mbuf_valid_csum_request_flags
;
725 *value
= mbuf
->m_pkthdr
.csum_data
;
732 mbuf_clear_csum_requested(
735 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
736 mbuf
->m_pkthdr
.csum_data
= 0;
741 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags
=
742 MBUF_CSUM_DID_IP
| MBUF_CSUM_IP_GOOD
| MBUF_CSUM_DID_DATA
|
743 MBUF_CSUM_PSEUDO_HDR
| MBUF_CSUM_PARTIAL
;
746 mbuf_set_csum_performed(
748 mbuf_csum_performed_flags_t performed
,
751 performed
&= mbuf_valid_csum_performed_flags
;
752 mbuf
->m_pkthdr
.csum_flags
= (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | performed
;
753 mbuf
->m_pkthdr
.csum_data
= value
;
759 mbuf_get_csum_performed(
761 mbuf_csum_performed_flags_t
*performed
,
764 *performed
= mbuf
->m_pkthdr
.csum_flags
& mbuf_valid_csum_performed_flags
;
765 *value
= mbuf
->m_pkthdr
.csum_data
;
771 mbuf_clear_csum_performed(
774 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
775 mbuf
->m_pkthdr
.csum_data
= 0;
781 mbuf_inet_cksum(mbuf_t mbuf
, int protocol
, u_int32_t offset
, u_int32_t length
,
784 if (mbuf
== NULL
|| length
== 0 || csum
== NULL
||
785 (u_int32_t
)mbuf
->m_pkthdr
.len
< (offset
+ length
))
788 *csum
= inet_cksum(mbuf
, protocol
, offset
, length
);
794 mbuf_inet6_cksum(mbuf_t mbuf
, int protocol
, u_int32_t offset
, u_int32_t length
,
797 if (mbuf
== NULL
|| length
== 0 || csum
== NULL
||
798 (u_int32_t
)mbuf
->m_pkthdr
.len
< (offset
+ length
))
801 *csum
= inet6_cksum(mbuf
, protocol
, offset
, length
);
806 mbuf_inet6_cksum(__unused mbuf_t mbuf
, __unused
int protocol
,
807 __unused u_int32_t offset
, __unused u_int32_t length
,
808 __unused u_int16_t
*csum
)
810 panic("mbuf_inet6_cksum() doesn't exist on this platform\n");
815 inet6_cksum(__unused
struct mbuf
*m
, __unused
unsigned int nxt
,
816 __unused
unsigned int off
, __unused
unsigned int len
)
818 panic("inet6_cksum() doesn't exist on this platform\n");
822 void nd6_lookup_ipv6(void);
824 nd6_lookup_ipv6(void)
826 panic("nd6_lookup_ipv6() doesn't exist on this platform\n");
830 in6addr_local(__unused
struct in6_addr
*a
)
832 panic("in6addr_local() doesn't exist on this platform\n");
836 void nd6_storelladdr(void);
838 nd6_storelladdr(void)
840 panic("nd6_storelladdr() doesn't exist on this platform\n");
848 #define MTAG_FIRST_ID FIRST_KPI_STR_ID
853 mbuf_tag_id_t
*out_id
)
855 return net_str_id_find_internal(string
, out_id
, NSI_MBUF_TAG
, 1);
862 mbuf_tag_type_t type
,
868 u_int32_t mtag_id_first
, mtag_id_last
;
873 /* Sanity check parameters */
874 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
, NSI_MBUF_TAG
);
875 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< mtag_id_first
||
876 id
> mtag_id_last
|| length
< 1 || (length
& 0xffff0000) != 0 ||
881 /* Make sure this mtag hasn't already been allocated */
882 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
887 /* Allocate an mtag */
888 tag
= m_tag_create(id
, type
, length
, how
, mbuf
);
890 return how
== M_WAITOK
? ENOMEM
: EWOULDBLOCK
;
893 /* Attach the mtag and set *data_p */
894 m_tag_prepend(mbuf
, tag
);
904 mbuf_tag_type_t type
,
909 u_int32_t mtag_id_first
, mtag_id_last
;
916 /* Sanity check parameters */
917 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
, NSI_MBUF_TAG
);
918 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< mtag_id_first
||
919 id
> mtag_id_last
|| length
== NULL
|| data_p
== NULL
) {
924 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
929 /* Copy out the pointer to the data and the lenght value */
930 *length
= tag
->m_tag_len
;
940 mbuf_tag_type_t type
)
943 u_int32_t mtag_id_first
, mtag_id_last
;
945 /* Sanity check parameters */
946 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
, NSI_MBUF_TAG
);
947 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< mtag_id_first
||
951 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
956 m_tag_delete(mbuf
, tag
);
961 * Maximum length of driver auxiliary data; keep this small to
962 * fit in a single mbuf to avoid wasting memory, rounded down to
963 * the nearest 64-bit boundary. This takes into account mbuf
964 * tag-related (m_taghdr + m_tag) as well m_drvaux_tag structs.
966 #define MBUF_DRVAUX_MAXLEN \
967 P2ROUNDDOWN(MLEN - sizeof (struct m_taghdr) - \
968 M_TAG_ALIGN(sizeof (struct m_drvaux_tag)), sizeof (uint64_t))
971 mbuf_add_drvaux(mbuf_t mbuf
, mbuf_how_t how
, u_int32_t family
,
972 u_int32_t subfamily
, size_t length
, void **data_p
)
974 struct m_drvaux_tag
*p
;
977 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
) ||
978 length
== 0 || length
> MBUF_DRVAUX_MAXLEN
)
984 /* Check if one is already associated */
985 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
986 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) != NULL
)
989 /* Tag is (m_drvaux_tag + module specific data) */
990 if ((tag
= m_tag_create(KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_DRVAUX
,
991 sizeof (*p
) + length
, how
, mbuf
)) == NULL
)
992 return ((how
== MBUF_WAITOK
) ? ENOMEM
: EWOULDBLOCK
);
994 p
= (struct m_drvaux_tag
*)(tag
+ 1);
995 p
->da_family
= family
;
996 p
->da_subfamily
= subfamily
;
997 p
->da_length
= length
;
999 /* Associate the tag */
1000 m_tag_prepend(mbuf
, tag
);
1009 mbuf_find_drvaux(mbuf_t mbuf
, u_int32_t
*family_p
, u_int32_t
*subfamily_p
,
1010 u_int32_t
*length_p
, void **data_p
)
1012 struct m_drvaux_tag
*p
;
1015 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
) || data_p
== NULL
)
1020 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
1021 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) == NULL
)
1024 /* Must be at least size of m_drvaux_tag */
1025 VERIFY(tag
->m_tag_len
>= sizeof (*p
));
1027 p
= (struct m_drvaux_tag
*)(tag
+ 1);
1028 VERIFY(p
->da_length
> 0 && p
->da_length
<= MBUF_DRVAUX_MAXLEN
);
1030 if (family_p
!= NULL
)
1031 *family_p
= p
->da_family
;
1032 if (subfamily_p
!= NULL
)
1033 *subfamily_p
= p
->da_subfamily
;
1034 if (length_p
!= NULL
)
1035 *length_p
= p
->da_length
;
1043 mbuf_del_drvaux(mbuf_t mbuf
)
1047 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
))
1050 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
1051 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) != NULL
)
1052 m_tag_delete(mbuf
, tag
);
1056 void mbuf_stats(struct mbuf_stat
*stats
)
1058 stats
->mbufs
= mbstat
.m_mbufs
;
1059 stats
->clusters
= mbstat
.m_clusters
;
1060 stats
->clfree
= mbstat
.m_clfree
;
1061 stats
->drops
= mbstat
.m_drops
;
1062 stats
->wait
= mbstat
.m_wait
;
1063 stats
->drain
= mbstat
.m_drain
;
1064 __builtin_memcpy(stats
->mtypes
, mbstat
.m_mtypes
, sizeof(stats
->mtypes
));
1065 stats
->mcfail
= mbstat
.m_mcfail
;
1066 stats
->mpfail
= mbstat
.m_mpfail
;
1067 stats
->msize
= mbstat
.m_msize
;
1068 stats
->mclbytes
= mbstat
.m_mclbytes
;
1069 stats
->minclsize
= mbstat
.m_minclsize
;
1070 stats
->mlen
= mbstat
.m_mlen
;
1071 stats
->mhlen
= mbstat
.m_mhlen
;
1072 stats
->bigclusters
= mbstat
.m_bigclusters
;
1073 stats
->bigclfree
= mbstat
.m_bigclfree
;
1074 stats
->bigmclbytes
= mbstat
.m_bigmclbytes
;
1078 mbuf_allocpacket(mbuf_how_t how
, size_t packetlen
, unsigned int *maxchunks
, mbuf_t
*mbuf
)
1082 unsigned int numpkts
= 1;
1083 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
1085 if (packetlen
== 0) {
1089 m
= m_allocpacket_internal(&numpkts
, packetlen
, maxchunks
? &numchunks
: NULL
, how
, 1, 0);
1091 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
)
1097 *maxchunks
= numchunks
;
1106 mbuf_allocpacket_list(unsigned int numpkts
, mbuf_how_t how
, size_t packetlen
, unsigned int *maxchunks
, mbuf_t
*mbuf
)
1110 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
1116 if (packetlen
== 0) {
1120 m
= m_allocpacket_internal(&numpkts
, packetlen
, maxchunks
? &numchunks
: NULL
, how
, 1, 0);
1122 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
)
1128 *maxchunks
= numchunks
;
1136 __private_extern__
size_t
1137 mbuf_pkt_list_len(mbuf_t m
)
1143 len
+= mbuf_pkthdr_len(n
);
1144 n
= mbuf_nextpkt(n
);
1149 __private_extern__
size_t
1150 mbuf_pkt_list_maxlen(mbuf_t m
)
1156 maxlen
+= mbuf_pkthdr_maxlen(n
);
1157 n
= mbuf_nextpkt(n
);
1163 * mbuf_copyback differs from m_copyback in a few ways:
1164 * 1) mbuf_copyback will allocate clusters for new mbufs we append
1165 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
1166 * 3) mbuf_copyback reports whether or not the operation succeeded
1167 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
1182 const char *cp
= data
;
1184 if (m
== NULL
|| len
== 0 || data
== NULL
)
1187 while (off
> (mlen
= m
->m_len
)) {
1190 if (m
->m_next
== 0) {
1191 n
= m_getclr(how
, m
->m_type
);
1196 n
->m_len
= MIN(MLEN
, len
+ off
);
1203 mlen
= MIN(m
->m_len
- off
, len
);
1204 if (mlen
< len
&& m
->m_next
== NULL
&& mbuf_trailingspace(m
) > 0) {
1205 size_t grow
= MIN(mbuf_trailingspace(m
), len
- mlen
);
1209 bcopy(cp
, off
+ (char*)mbuf_data(m
), (unsigned)mlen
);
1217 if (m
->m_next
== 0) {
1218 n
= m_get(how
, m
->m_type
);
1223 if (len
> MINCLSIZE
) {
1224 /* cluter allocation failure is okay, we can grow chain */
1225 mbuf_mclget(how
, m
->m_type
, &n
);
1227 n
->m_len
= MIN(mbuf_maxlen(n
), len
);
1234 if ((m_start
->m_flags
& M_PKTHDR
) && (m_start
->m_pkthdr
.len
< totlen
))
1235 m_start
->m_pkthdr
.len
= totlen
;
1247 mbuf_get_mhlen(void)
1253 mbuf_get_minclsize(void)
1255 return (MHLEN
+ MLEN
);
1259 mbuf_get_traffic_class_max_count(void)
1261 return (MBUF_TC_MAX
);
1265 mbuf_get_traffic_class_index(mbuf_traffic_class_t tc
, u_int32_t
*index
)
1267 if (index
== NULL
|| (u_int32_t
)tc
>= MBUF_TC_MAX
)
1270 *index
= MBUF_SCIDX(m_service_class_from_val(MBUF_TC2SCVAL(tc
)));
1274 mbuf_traffic_class_t
1275 mbuf_get_traffic_class(mbuf_t m
)
1277 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1278 return (MBUF_TC_BE
);
1280 return (m_get_traffic_class(m
));
1284 mbuf_set_traffic_class(mbuf_t m
, mbuf_traffic_class_t tc
)
1286 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) ||
1287 ((u_int32_t
)tc
>= MBUF_TC_MAX
))
1290 return (m_set_traffic_class(m
, tc
));
1294 mbuf_is_traffic_class_privileged(mbuf_t m
)
1296 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) ||
1297 !MBUF_VALID_SC(m
->m_pkthdr
.pkt_svc
))
1300 return ((m
->m_pkthdr
.pkt_flags
& PKTF_PRIO_PRIVILEGED
) ? 1 : 0);
1304 mbuf_get_service_class_max_count(void)
1306 return (MBUF_SC_MAX_CLASSES
);
1310 mbuf_get_service_class_index(mbuf_svc_class_t sc
, u_int32_t
*index
)
1312 if (index
== NULL
|| !MBUF_VALID_SC(sc
))
1315 *index
= MBUF_SCIDX(sc
);
1320 mbuf_get_service_class(mbuf_t m
)
1322 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1323 return (MBUF_SC_BE
);
1325 return (m_get_service_class(m
));
1329 mbuf_set_service_class(mbuf_t m
, mbuf_svc_class_t sc
)
1331 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1334 return (m_set_service_class(m
, sc
));
1338 mbuf_pkthdr_aux_flags(mbuf_t m
, mbuf_pkthdr_aux_flags_t
*flagsp
)
1342 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || flagsp
== NULL
)
1346 flags
= m
->m_pkthdr
.pkt_flags
;
1347 if ((flags
& (PKTF_INET_RESOLVE
|PKTF_RESOLVE_RTR
)) ==
1348 (PKTF_INET_RESOLVE
|PKTF_RESOLVE_RTR
))
1349 *flagsp
|= MBUF_PKTAUXF_INET_RESOLVE_RTR
;
1350 if ((flags
& (PKTF_INET6_RESOLVE
|PKTF_RESOLVE_RTR
)) ==
1351 (PKTF_INET6_RESOLVE
|PKTF_RESOLVE_RTR
))
1352 *flagsp
|= MBUF_PKTAUXF_INET6_RESOLVE_RTR
;
1354 /* These 2 flags are mutually exclusive */
1356 (MBUF_PKTAUXF_INET_RESOLVE_RTR
| MBUF_PKTAUXF_INET6_RESOLVE_RTR
)) !=
1357 (MBUF_PKTAUXF_INET_RESOLVE_RTR
| MBUF_PKTAUXF_INET6_RESOLVE_RTR
));
1363 mbuf_get_driver_scratch(mbuf_t m
, u_int8_t
**area
, size_t *area_len
)
1365 if (m
== NULL
|| area
== NULL
|| area_len
== NULL
||
1366 !(m
->m_flags
& M_PKTHDR
))
1369 *area_len
= m_scratch_get(m
, area
);