2 * Copyright (c) 2004-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 //#include <sys/kpi_interface.h>
32 #include <sys/param.h>
34 #include <sys/mcache.h>
35 #include <sys/socket.h>
36 #include <kern/debug.h>
37 #include <libkern/OSAtomic.h>
38 #include <kern/kalloc.h>
40 #include <netinet/in.h>
41 #include <netinet/ip_var.h>
43 #include "net/net_str_id.h"
45 /* mbuf flags visible to KPI clients; do not add private flags here */
46 static const mbuf_flags_t mbuf_flags_mask
= (MBUF_EXT
| MBUF_PKTHDR
| MBUF_EOR
|
47 MBUF_LOOP
| MBUF_BCAST
| MBUF_MCAST
| MBUF_FRAG
| MBUF_FIRSTFRAG
|
48 MBUF_LASTFRAG
| MBUF_PROMISC
| MBUF_HASFCS
);
50 /* Unalterable mbuf flags */
51 static const mbuf_flags_t mbuf_cflags_mask
= (MBUF_EXT
);
53 void* mbuf_data(mbuf_t mbuf
)
58 void* mbuf_datastart(mbuf_t mbuf
)
60 if (mbuf
->m_flags
& M_EXT
)
61 return mbuf
->m_ext
.ext_buf
;
62 if (mbuf
->m_flags
& M_PKTHDR
)
63 return mbuf
->m_pktdat
;
67 errno_t
mbuf_setdata(mbuf_t mbuf
, void* data
, size_t len
)
69 size_t start
= (size_t)((char*)mbuf_datastart(mbuf
));
70 size_t maxlen
= mbuf_maxlen(mbuf
);
72 if ((size_t)data
< start
|| ((size_t)data
) + len
> start
+ maxlen
)
80 errno_t
mbuf_align_32(mbuf_t mbuf
, size_t len
)
82 if ((mbuf
->m_flags
& M_EXT
) != 0 && m_mclhasreference(mbuf
))
84 mbuf
->m_data
= mbuf_datastart(mbuf
);
85 mbuf
->m_data
+= ((mbuf_trailingspace(mbuf
) - len
) &~ (sizeof(u_int32_t
) - 1));
90 /* This function is used to provide mcl_to_paddr via symbol indirection,
91 * please avoid any change in behavior or remove the indirection in
94 addr64_t
mbuf_data_to_physical(void* ptr
)
96 return ((addr64_t
)mcl_to_paddr(ptr
));
99 errno_t
mbuf_get(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
101 /* Must set *mbuf to NULL in failure case */
102 *mbuf
= m_get(how
, type
);
104 return (*mbuf
== NULL
) ? ENOMEM
: 0;
107 errno_t
mbuf_gethdr(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
109 /* Must set *mbuf to NULL in failure case */
110 *mbuf
= m_gethdr(how
, type
);
112 return (*mbuf
== NULL
) ? ENOMEM
: 0;
116 mbuf_attachcluster(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
,
117 caddr_t extbuf
, void (*extfree
)(caddr_t
, u_int
, caddr_t
),
118 size_t extsize
, caddr_t extarg
)
120 if (mbuf
== NULL
|| extbuf
== NULL
|| extfree
== NULL
|| extsize
== 0)
123 if ((*mbuf
= m_clattach(*mbuf
, type
, extbuf
,
124 extfree
, extsize
, extarg
, how
)) == NULL
)
131 mbuf_alloccluster(mbuf_how_t how
, size_t *size
, caddr_t
*addr
)
133 if (size
== NULL
|| *size
== 0 || addr
== NULL
)
138 /* Jumbo cluster pool not available? */
139 if (*size
> MBIGCLBYTES
&& njcl
== 0)
142 if (*size
<= MCLBYTES
&& (*addr
= m_mclalloc(how
)) != NULL
)
144 else if (*size
> MCLBYTES
&& *size
<= MBIGCLBYTES
&&
145 (*addr
= m_bigalloc(how
)) != NULL
)
147 else if (*size
> MBIGCLBYTES
&& *size
<= M16KCLBYTES
&&
148 (*addr
= m_16kalloc(how
)) != NULL
)
160 mbuf_freecluster(caddr_t addr
, size_t size
)
162 if (size
!= MCLBYTES
&& size
!= MBIGCLBYTES
&& size
!= M16KCLBYTES
)
163 panic("%s: invalid size (%ld) for cluster %p", __func__
,
166 if (size
== MCLBYTES
)
168 else if (size
== MBIGCLBYTES
)
169 m_bigfree(addr
, MBIGCLBYTES
, NULL
);
171 m_16kfree(addr
, M16KCLBYTES
, NULL
);
173 panic("%s: freeing jumbo cluster to an empty pool", __func__
);
177 mbuf_getcluster(mbuf_how_t how
, mbuf_type_t type
, size_t size
, mbuf_t
* mbuf
)
179 /* Must set *mbuf to NULL in failure case */
186 *mbuf
= m_get(how
, type
);
192 * At the time this code was written, m_{mclget,mbigget,m16kget}
193 * would always return the same value that was passed in to it.
195 if (size
== MCLBYTES
) {
196 *mbuf
= m_mclget(*mbuf
, how
);
197 } else if (size
== MBIGCLBYTES
) {
198 *mbuf
= m_mbigget(*mbuf
, how
);
199 } else if (size
== M16KCLBYTES
) {
201 *mbuf
= m_m16kget(*mbuf
, how
);
203 /* Jumbo cluster pool not available? */
211 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0)
214 if (created
&& error
!= 0) {
221 errno_t
mbuf_mclget(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
223 /* Must set *mbuf to NULL in failure case */
226 if (mbuf
== NULL
) return EINVAL
;
228 error
= mbuf_get(how
, type
, mbuf
);
235 * At the time this code was written, m_mclget would always
236 * return the same value that was passed in to it.
238 *mbuf
= m_mclget(*mbuf
, how
);
240 if (created
&& ((*mbuf
)->m_flags
& M_EXT
) == 0) {
244 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0)
250 errno_t
mbuf_getpacket(mbuf_how_t how
, mbuf_t
*mbuf
)
252 /* Must set *mbuf to NULL in failure case */
255 *mbuf
= m_getpacket_how(how
);
258 if (how
== MBUF_WAITOK
)
267 /* This function is used to provide m_free via symbol indirection, please avoid
268 * any change in behavior or remove the indirection in config/Unsupported*
270 mbuf_t
mbuf_free(mbuf_t mbuf
)
275 /* This function is used to provide m_freem via symbol indirection, please avoid
276 * any change in behavior or remove the indirection in config/Unsupported*
278 void mbuf_freem(mbuf_t mbuf
)
283 int mbuf_freem_list(mbuf_t mbuf
)
285 return m_freem_list(mbuf
);
288 size_t mbuf_leadingspace(const mbuf_t mbuf
)
290 return m_leadingspace(mbuf
);
293 /* This function is used to provide m_trailingspace via symbol indirection,
294 * please avoid any change in behavior or remove the indirection in
295 * config/Unsupported*
297 size_t mbuf_trailingspace(const mbuf_t mbuf
)
299 return m_trailingspace(mbuf
);
303 errno_t
mbuf_copym(const mbuf_t src
, size_t offset
, size_t len
,
304 mbuf_how_t how
, mbuf_t
*new_mbuf
)
306 /* Must set *mbuf to NULL in failure case */
307 *new_mbuf
= m_copym(src
, offset
, len
, how
);
309 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
312 errno_t
mbuf_dup(const mbuf_t src
, mbuf_how_t how
, mbuf_t
*new_mbuf
)
314 /* Must set *new_mbuf to NULL in failure case */
315 *new_mbuf
= m_dup(src
, how
);
317 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
320 errno_t
mbuf_prepend(mbuf_t
*orig
, size_t len
, mbuf_how_t how
)
322 /* Must set *orig to NULL in failure case */
323 *orig
= m_prepend_2(*orig
, len
, how
);
325 return (*orig
== NULL
) ? ENOMEM
: 0;
328 errno_t
mbuf_split(mbuf_t src
, size_t offset
,
329 mbuf_how_t how
, mbuf_t
*new_mbuf
)
331 /* Must set *new_mbuf to NULL in failure case */
332 *new_mbuf
= m_split(src
, offset
, how
);
334 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
337 errno_t
mbuf_pullup(mbuf_t
*mbuf
, size_t len
)
339 /* Must set *mbuf to NULL in failure case */
340 *mbuf
= m_pullup(*mbuf
, len
);
342 return (*mbuf
== NULL
) ? ENOMEM
: 0;
345 errno_t
mbuf_pulldown(mbuf_t src
, size_t *offset
, size_t len
, mbuf_t
*location
)
347 /* Must set *location to NULL in failure case */
349 *location
= m_pulldown(src
, *offset
, len
, &new_offset
);
350 *offset
= new_offset
;
352 return (*location
== NULL
) ? ENOMEM
: 0;
355 /* This function is used to provide m_adj via symbol indirection, please avoid
356 * any change in behavior or remove the indirection in config/Unsupported*
358 void mbuf_adj(mbuf_t mbuf
, int len
)
363 errno_t
mbuf_adjustlen(mbuf_t m
, int amount
)
365 /* Verify m_len will be valid after adding amount */
367 int used
= (size_t)mbuf_data(m
) - (size_t)mbuf_datastart(m
) +
370 if ((size_t)(amount
+ used
) > mbuf_maxlen(m
))
373 else if (-amount
> m
->m_len
) {
382 mbuf_concatenate(mbuf_t dst
, mbuf_t src
)
389 /* return dst as is in the current implementation */
392 errno_t
mbuf_copydata(const mbuf_t m0
, size_t off
, size_t len
, void* out_data
)
394 /* Copied m_copydata, added error handling (don't just panic) */
401 if (off
< (size_t)m
->m_len
)
409 count
= m
->m_len
- off
> len
? len
: m
->m_len
- off
;
410 bcopy(mtod(m
, caddr_t
) + off
, out_data
, count
);
412 out_data
= ((char*)out_data
) + count
;
420 int mbuf_mclhasreference(mbuf_t mbuf
)
422 if ((mbuf
->m_flags
& M_EXT
))
423 return m_mclhasreference(mbuf
);
430 mbuf_t
mbuf_next(const mbuf_t mbuf
)
435 errno_t
mbuf_setnext(mbuf_t mbuf
, mbuf_t next
)
437 if (next
&& ((next
)->m_nextpkt
!= NULL
||
438 (next
)->m_type
== MT_FREE
)) return EINVAL
;
444 mbuf_t
mbuf_nextpkt(const mbuf_t mbuf
)
446 return mbuf
->m_nextpkt
;
449 void mbuf_setnextpkt(mbuf_t mbuf
, mbuf_t nextpkt
)
451 mbuf
->m_nextpkt
= nextpkt
;
454 size_t mbuf_len(const mbuf_t mbuf
)
459 void mbuf_setlen(mbuf_t mbuf
, size_t len
)
464 size_t mbuf_maxlen(const mbuf_t mbuf
)
466 if (mbuf
->m_flags
& M_EXT
)
467 return mbuf
->m_ext
.ext_size
;
468 return &mbuf
->m_dat
[MLEN
] - ((char*)mbuf_datastart(mbuf
));
471 mbuf_type_t
mbuf_type(const mbuf_t mbuf
)
476 errno_t
mbuf_settype(mbuf_t mbuf
, mbuf_type_t new_type
)
478 if (new_type
== MBUF_TYPE_FREE
) return EINVAL
;
480 m_mchtype(mbuf
, new_type
);
486 mbuf_flags(const mbuf_t mbuf
)
488 return (mbuf
->m_flags
& mbuf_flags_mask
);
492 mbuf_setflags(mbuf_t mbuf
, mbuf_flags_t flags
)
496 if ((flags
| (mbuf
->m_flags
& mbuf_flags_mask
)) &
497 (~mbuf_flags_mask
| mbuf_cflags_mask
)) {
500 mbuf_flags_t oflags
= mbuf
->m_flags
;
501 mbuf
->m_flags
= flags
| (mbuf
->m_flags
& ~mbuf_flags_mask
);
503 * If M_PKTHDR bit has changed, we have work to do;
504 * m_reinit() will take care of setting/clearing the
505 * bit, as well as the rest of bookkeeping.
507 if ((oflags
^ mbuf
->m_flags
) & M_PKTHDR
) {
508 mbuf
->m_flags
^= M_PKTHDR
; /* restore */
510 (mbuf
->m_flags
& M_PKTHDR
) ? 0 : 1);
518 mbuf_setflags_mask(mbuf_t mbuf
, mbuf_flags_t flags
, mbuf_flags_t mask
)
522 if ((flags
| mask
) & (~mbuf_flags_mask
| mbuf_cflags_mask
)) {
525 mbuf_flags_t oflags
= mbuf
->m_flags
;
526 mbuf
->m_flags
= (flags
& mask
) | (mbuf
->m_flags
& ~mask
);
528 * If M_PKTHDR bit has changed, we have work to do;
529 * m_reinit() will take care of setting/clearing the
530 * bit, as well as the rest of bookkeeping.
532 if ((oflags
^ mbuf
->m_flags
) & M_PKTHDR
) {
533 mbuf
->m_flags
^= M_PKTHDR
; /* restore */
535 (mbuf
->m_flags
& M_PKTHDR
) ? 0 : 1);
542 errno_t
mbuf_copy_pkthdr(mbuf_t dest
, const mbuf_t src
)
544 if (((src
)->m_flags
& M_PKTHDR
) == 0)
547 m_copy_pkthdr(dest
, src
);
552 size_t mbuf_pkthdr_len(const mbuf_t mbuf
)
554 return mbuf
->m_pkthdr
.len
;
557 void mbuf_pkthdr_setlen(mbuf_t mbuf
, size_t len
)
559 mbuf
->m_pkthdr
.len
= len
;
562 void mbuf_pkthdr_adjustlen(mbuf_t mbuf
, int amount
)
564 mbuf
->m_pkthdr
.len
+= amount
;
567 ifnet_t
mbuf_pkthdr_rcvif(const mbuf_t mbuf
)
569 // If we reference count ifnets, we should take a reference here before returning
570 return mbuf
->m_pkthdr
.rcvif
;
573 errno_t
mbuf_pkthdr_setrcvif(mbuf_t mbuf
, ifnet_t ifnet
)
575 /* May want to walk ifnet list to determine if interface is valid */
576 mbuf
->m_pkthdr
.rcvif
= (struct ifnet
*)ifnet
;
580 void* mbuf_pkthdr_header(const mbuf_t mbuf
)
582 return mbuf
->m_pkthdr
.pkt_hdr
;
585 void mbuf_pkthdr_setheader(mbuf_t mbuf
, void *header
)
587 mbuf
->m_pkthdr
.pkt_hdr
= (void*)header
;
591 mbuf_inbound_modified(mbuf_t mbuf
)
593 /* Invalidate hardware generated checksum flags */
594 mbuf
->m_pkthdr
.csum_flags
= 0;
598 mbuf_outbound_finalize(struct mbuf
*m
, u_int32_t pf
, size_t o
)
600 /* Generate the packet in software, client needs it */
603 (void) in_finalize_cksum(m
, o
, m
->m_pkthdr
.csum_flags
);
609 * Checksum offload should not have been enabled when
610 * extension headers exist; indicate that the callee
611 * should skip such case by setting optlen to -1.
613 (void) in6_finalize_cksum(m
, o
, -1, -1, m
->m_pkthdr
.csum_flags
);
627 mbuf
->m_pkthdr
.csum_flags
|= CSUM_VLAN_TAG_VALID
;
628 mbuf
->m_pkthdr
.vlan_tag
= vlan
;
638 if ((mbuf
->m_pkthdr
.csum_flags
& CSUM_VLAN_TAG_VALID
) == 0)
639 return ENXIO
; // No vlan tag set
641 *vlan
= mbuf
->m_pkthdr
.vlan_tag
;
650 mbuf
->m_pkthdr
.csum_flags
&= ~CSUM_VLAN_TAG_VALID
;
651 mbuf
->m_pkthdr
.vlan_tag
= 0;
656 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags
=
657 MBUF_CSUM_REQ_IP
| MBUF_CSUM_REQ_TCP
| MBUF_CSUM_REQ_UDP
|
658 MBUF_CSUM_PARTIAL
| MBUF_CSUM_REQ_TCPIPV6
| MBUF_CSUM_REQ_UDPIPV6
;
661 mbuf_set_csum_requested(
663 mbuf_csum_request_flags_t request
,
666 request
&= mbuf_valid_csum_request_flags
;
667 mbuf
->m_pkthdr
.csum_flags
= (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | request
;
668 mbuf
->m_pkthdr
.csum_data
= value
;
673 static const mbuf_tso_request_flags_t mbuf_valid_tso_request_flags
=
674 MBUF_TSO_IPV4
| MBUF_TSO_IPV6
;
677 mbuf_get_tso_requested(
679 mbuf_tso_request_flags_t
*request
,
682 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
683 request
== NULL
|| value
== NULL
)
686 *request
= mbuf
->m_pkthdr
.csum_flags
;
687 *request
&= mbuf_valid_tso_request_flags
;
688 if (*request
&& value
!= NULL
)
689 *value
= mbuf
->m_pkthdr
.tso_segsz
;
695 mbuf_get_csum_requested(
697 mbuf_csum_request_flags_t
*request
,
700 *request
= mbuf
->m_pkthdr
.csum_flags
;
701 *request
&= mbuf_valid_csum_request_flags
;
703 *value
= mbuf
->m_pkthdr
.csum_data
;
710 mbuf_clear_csum_requested(
713 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
714 mbuf
->m_pkthdr
.csum_data
= 0;
719 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags
=
720 MBUF_CSUM_DID_IP
| MBUF_CSUM_IP_GOOD
| MBUF_CSUM_DID_DATA
|
721 MBUF_CSUM_PSEUDO_HDR
| MBUF_CSUM_PARTIAL
;
724 mbuf_set_csum_performed(
726 mbuf_csum_performed_flags_t performed
,
729 performed
&= mbuf_valid_csum_performed_flags
;
730 mbuf
->m_pkthdr
.csum_flags
= (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | performed
;
731 mbuf
->m_pkthdr
.csum_data
= value
;
737 mbuf_get_csum_performed(
739 mbuf_csum_performed_flags_t
*performed
,
742 *performed
= mbuf
->m_pkthdr
.csum_flags
& mbuf_valid_csum_performed_flags
;
743 *value
= mbuf
->m_pkthdr
.csum_data
;
749 mbuf_clear_csum_performed(
752 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
753 mbuf
->m_pkthdr
.csum_data
= 0;
759 mbuf_inet_cksum(mbuf_t mbuf
, int protocol
, u_int32_t offset
, u_int32_t length
,
762 if (mbuf
== NULL
|| length
== 0 || csum
== NULL
||
763 (u_int32_t
)mbuf
->m_pkthdr
.len
< (offset
+ length
))
766 *csum
= inet_cksum(mbuf
, protocol
, offset
, length
);
772 mbuf_inet6_cksum(mbuf_t mbuf
, int protocol
, u_int32_t offset
, u_int32_t length
,
775 if (mbuf
== NULL
|| length
== 0 || csum
== NULL
||
776 (u_int32_t
)mbuf
->m_pkthdr
.len
< (offset
+ length
))
779 *csum
= inet6_cksum(mbuf
, protocol
, offset
, length
);
784 mbuf_inet6_cksum(__unused mbuf_t mbuf
, __unused
int protocol
,
785 __unused u_int32_t offset
, __unused u_int32_t length
,
786 __unused u_int16_t
*csum
)
788 panic("mbuf_inet6_cksum() doesn't exist on this platform\n");
793 inet6_cksum(__unused
struct mbuf
*m
, __unused
unsigned int nxt
,
794 __unused
unsigned int off
, __unused
unsigned int len
)
796 panic("inet6_cksum() doesn't exist on this platform\n");
800 void nd6_lookup_ipv6(void);
802 nd6_lookup_ipv6(void)
804 panic("nd6_lookup_ipv6() doesn't exist on this platform\n");
808 in6addr_local(__unused
struct in6_addr
*a
)
810 panic("in6addr_local() doesn't exist on this platform\n");
814 void nd6_storelladdr(void);
816 nd6_storelladdr(void)
818 panic("nd6_storelladdr() doesn't exist on this platform\n");
826 #define MTAG_FIRST_ID FIRST_KPI_STR_ID
831 mbuf_tag_id_t
*out_id
)
833 return net_str_id_find_internal(string
, out_id
, NSI_MBUF_TAG
, 1);
840 mbuf_tag_type_t type
,
846 u_int32_t mtag_id_first
, mtag_id_last
;
851 /* Sanity check parameters */
852 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
, NSI_MBUF_TAG
);
853 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< mtag_id_first
||
854 id
> mtag_id_last
|| length
< 1 || (length
& 0xffff0000) != 0 ||
859 /* Make sure this mtag hasn't already been allocated */
860 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
865 /* Allocate an mtag */
866 tag
= m_tag_create(id
, type
, length
, how
, mbuf
);
868 return how
== M_WAITOK
? ENOMEM
: EWOULDBLOCK
;
871 /* Attach the mtag and set *data_p */
872 m_tag_prepend(mbuf
, tag
);
882 mbuf_tag_type_t type
,
887 u_int32_t mtag_id_first
, mtag_id_last
;
894 /* Sanity check parameters */
895 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
, NSI_MBUF_TAG
);
896 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< mtag_id_first
||
897 id
> mtag_id_last
|| length
== NULL
|| data_p
== NULL
) {
902 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
907 /* Copy out the pointer to the data and the lenght value */
908 *length
= tag
->m_tag_len
;
918 mbuf_tag_type_t type
)
921 u_int32_t mtag_id_first
, mtag_id_last
;
923 /* Sanity check parameters */
924 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
, NSI_MBUF_TAG
);
925 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< mtag_id_first
||
929 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
934 m_tag_delete(mbuf
, tag
);
939 * Maximum length of driver auxiliary data; keep this small to
940 * fit in a single mbuf to avoid wasting memory, rounded down to
941 * the nearest 64-bit boundary. This takes into account mbuf
942 * tag-related (m_taghdr + m_tag) as well m_drvaux_tag structs.
944 #define MBUF_DRVAUX_MAXLEN \
945 P2ROUNDDOWN(MLEN - sizeof (struct m_taghdr) - \
946 M_TAG_ALIGN(sizeof (struct m_drvaux_tag)), sizeof (uint64_t))
949 mbuf_add_drvaux(mbuf_t mbuf
, mbuf_how_t how
, u_int32_t family
,
950 u_int32_t subfamily
, size_t length
, void **data_p
)
952 struct m_drvaux_tag
*p
;
955 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
) ||
956 length
== 0 || length
> MBUF_DRVAUX_MAXLEN
)
962 /* Check if one is already associated */
963 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
964 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) != NULL
)
967 /* Tag is (m_drvaux_tag + module specific data) */
968 if ((tag
= m_tag_create(KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_DRVAUX
,
969 sizeof (*p
) + length
, how
, mbuf
)) == NULL
)
970 return ((how
== MBUF_WAITOK
) ? ENOMEM
: EWOULDBLOCK
);
972 p
= (struct m_drvaux_tag
*)(tag
+ 1);
973 p
->da_family
= family
;
974 p
->da_subfamily
= subfamily
;
975 p
->da_length
= length
;
977 /* Associate the tag */
978 m_tag_prepend(mbuf
, tag
);
987 mbuf_find_drvaux(mbuf_t mbuf
, u_int32_t
*family_p
, u_int32_t
*subfamily_p
,
988 u_int32_t
*length_p
, void **data_p
)
990 struct m_drvaux_tag
*p
;
993 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
) || data_p
== NULL
)
998 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
999 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) == NULL
)
1002 /* Must be at least size of m_drvaux_tag */
1003 VERIFY(tag
->m_tag_len
>= sizeof (*p
));
1005 p
= (struct m_drvaux_tag
*)(tag
+ 1);
1006 VERIFY(p
->da_length
> 0 && p
->da_length
<= MBUF_DRVAUX_MAXLEN
);
1008 if (family_p
!= NULL
)
1009 *family_p
= p
->da_family
;
1010 if (subfamily_p
!= NULL
)
1011 *subfamily_p
= p
->da_subfamily
;
1012 if (length_p
!= NULL
)
1013 *length_p
= p
->da_length
;
1021 mbuf_del_drvaux(mbuf_t mbuf
)
1025 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
))
1028 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
1029 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) != NULL
)
1030 m_tag_delete(mbuf
, tag
);
1034 void mbuf_stats(struct mbuf_stat
*stats
)
1036 stats
->mbufs
= mbstat
.m_mbufs
;
1037 stats
->clusters
= mbstat
.m_clusters
;
1038 stats
->clfree
= mbstat
.m_clfree
;
1039 stats
->drops
= mbstat
.m_drops
;
1040 stats
->wait
= mbstat
.m_wait
;
1041 stats
->drain
= mbstat
.m_drain
;
1042 __builtin_memcpy(stats
->mtypes
, mbstat
.m_mtypes
, sizeof(stats
->mtypes
));
1043 stats
->mcfail
= mbstat
.m_mcfail
;
1044 stats
->mpfail
= mbstat
.m_mpfail
;
1045 stats
->msize
= mbstat
.m_msize
;
1046 stats
->mclbytes
= mbstat
.m_mclbytes
;
1047 stats
->minclsize
= mbstat
.m_minclsize
;
1048 stats
->mlen
= mbstat
.m_mlen
;
1049 stats
->mhlen
= mbstat
.m_mhlen
;
1050 stats
->bigclusters
= mbstat
.m_bigclusters
;
1051 stats
->bigclfree
= mbstat
.m_bigclfree
;
1052 stats
->bigmclbytes
= mbstat
.m_bigmclbytes
;
1056 mbuf_allocpacket(mbuf_how_t how
, size_t packetlen
, unsigned int *maxchunks
, mbuf_t
*mbuf
)
1060 unsigned int numpkts
= 1;
1061 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
1063 if (packetlen
== 0) {
1067 m
= m_allocpacket_internal(&numpkts
, packetlen
, maxchunks
? &numchunks
: NULL
, how
, 1, 0);
1069 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
)
1075 *maxchunks
= numchunks
;
1084 mbuf_allocpacket_list(unsigned int numpkts
, mbuf_how_t how
, size_t packetlen
, unsigned int *maxchunks
, mbuf_t
*mbuf
)
1088 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
1094 if (packetlen
== 0) {
1098 m
= m_allocpacket_internal(&numpkts
, packetlen
, maxchunks
? &numchunks
: NULL
, how
, 1, 0);
1100 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
)
1106 *maxchunks
= numchunks
;
1117 * mbuf_copyback differs from m_copyback in a few ways:
1118 * 1) mbuf_copyback will allocate clusters for new mbufs we append
1119 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
1120 * 3) mbuf_copyback reports whether or not the operation succeeded
1121 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
1136 const char *cp
= data
;
1138 if (m
== NULL
|| len
== 0 || data
== NULL
)
1141 while (off
> (mlen
= m
->m_len
)) {
1144 if (m
->m_next
== 0) {
1145 n
= m_getclr(how
, m
->m_type
);
1150 n
->m_len
= MIN(MLEN
, len
+ off
);
1157 mlen
= MIN(m
->m_len
- off
, len
);
1158 if (mlen
< len
&& m
->m_next
== NULL
&& mbuf_trailingspace(m
) > 0) {
1159 size_t grow
= MIN(mbuf_trailingspace(m
), len
- mlen
);
1163 bcopy(cp
, off
+ (char*)mbuf_data(m
), (unsigned)mlen
);
1171 if (m
->m_next
== 0) {
1172 n
= m_get(how
, m
->m_type
);
1177 if (len
> MINCLSIZE
) {
1178 /* cluter allocation failure is okay, we can grow chain */
1179 mbuf_mclget(how
, m
->m_type
, &n
);
1181 n
->m_len
= MIN(mbuf_maxlen(n
), len
);
1188 if ((m_start
->m_flags
& M_PKTHDR
) && (m_start
->m_pkthdr
.len
< totlen
))
1189 m_start
->m_pkthdr
.len
= totlen
;
1201 mbuf_get_mhlen(void)
1207 mbuf_get_minclsize(void)
1209 return (MHLEN
+ MLEN
);
1213 mbuf_get_traffic_class_max_count(void)
1215 return (MBUF_TC_MAX
);
1219 mbuf_get_traffic_class_index(mbuf_traffic_class_t tc
, u_int32_t
*index
)
1221 if (index
== NULL
|| (u_int32_t
)tc
>= MBUF_TC_MAX
)
1224 *index
= MBUF_SCIDX(m_service_class_from_val(MBUF_TC2SCVAL(tc
)));
1228 mbuf_traffic_class_t
1229 mbuf_get_traffic_class(mbuf_t m
)
1231 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1232 return (MBUF_TC_BE
);
1234 return (m_get_traffic_class(m
));
1238 mbuf_set_traffic_class(mbuf_t m
, mbuf_traffic_class_t tc
)
1240 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) ||
1241 ((u_int32_t
)tc
>= MBUF_TC_MAX
))
1244 return (m_set_traffic_class(m
, tc
));
1248 mbuf_is_traffic_class_privileged(mbuf_t m
)
1250 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) ||
1251 !MBUF_VALID_SC(m
->m_pkthdr
.pkt_svc
))
1254 return ((m
->m_pkthdr
.pkt_flags
& PKTF_PRIO_PRIVILEGED
) ? 1 : 0);
1258 mbuf_get_service_class_max_count(void)
1260 return (MBUF_SC_MAX_CLASSES
);
1264 mbuf_get_service_class_index(mbuf_svc_class_t sc
, u_int32_t
*index
)
1266 if (index
== NULL
|| !MBUF_VALID_SC(sc
))
1269 *index
= MBUF_SCIDX(sc
);
1274 mbuf_get_service_class(mbuf_t m
)
1276 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1277 return (MBUF_SC_BE
);
1279 return (m_get_service_class(m
));
1283 mbuf_set_service_class(mbuf_t m
, mbuf_svc_class_t sc
)
1285 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
))
1288 return (m_set_service_class(m
, sc
));
1292 mbuf_pkthdr_aux_flags(mbuf_t m
, mbuf_pkthdr_aux_flags_t
*flagsp
)
1296 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || flagsp
== NULL
)
1300 flags
= m
->m_pkthdr
.pkt_flags
;
1301 if ((flags
& (PKTF_INET_RESOLVE
|PKTF_RESOLVE_RTR
)) ==
1302 (PKTF_INET_RESOLVE
|PKTF_RESOLVE_RTR
))
1303 *flagsp
|= MBUF_PKTAUXF_INET_RESOLVE_RTR
;
1304 if ((flags
& (PKTF_INET6_RESOLVE
|PKTF_RESOLVE_RTR
)) ==
1305 (PKTF_INET6_RESOLVE
|PKTF_RESOLVE_RTR
))
1306 *flagsp
|= MBUF_PKTAUXF_INET6_RESOLVE_RTR
;
1308 /* These 2 flags are mutually exclusive */
1310 (MBUF_PKTAUXF_INET_RESOLVE_RTR
| MBUF_PKTAUXF_INET6_RESOLVE_RTR
)) !=
1311 (MBUF_PKTAUXF_INET_RESOLVE_RTR
| MBUF_PKTAUXF_INET6_RESOLVE_RTR
));
1317 mbuf_get_driver_scratch(mbuf_t m
, u_int8_t
**area
, size_t *area_len
)
1319 if (m
== NULL
|| area
== NULL
|| area_len
== NULL
||
1320 !(m
->m_flags
& M_PKTHDR
))
1323 *area_len
= m_scratch_get(m
, area
);