2 * Copyright (c) 2004-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 //#include <sys/kpi_interface.h>
32 #include <sys/param.h>
34 #include <sys/socket.h>
35 #include <kern/debug.h>
36 #include <libkern/OSAtomic.h>
37 #include <kern/kalloc.h>
39 #include <netinet/in.h>
41 #include "net/net_str_id.h"
43 static const mbuf_flags_t mbuf_flags_mask
= MBUF_EXT
| MBUF_PKTHDR
| MBUF_EOR
|
44 MBUF_BCAST
| MBUF_MCAST
| MBUF_FRAG
| MBUF_FIRSTFRAG
|
45 MBUF_LASTFRAG
| MBUF_PROMISC
;
47 void* mbuf_data(mbuf_t mbuf
)
52 void* mbuf_datastart(mbuf_t mbuf
)
54 if (mbuf
->m_flags
& M_EXT
)
55 return mbuf
->m_ext
.ext_buf
;
56 if (mbuf
->m_flags
& M_PKTHDR
)
57 return mbuf
->m_pktdat
;
61 errno_t
mbuf_setdata(mbuf_t mbuf
, void* data
, size_t len
)
63 size_t start
= (size_t)((char*)mbuf_datastart(mbuf
));
64 size_t maxlen
= mbuf_maxlen(mbuf
);
66 if ((size_t)data
< start
|| ((size_t)data
) + len
> start
+ maxlen
)
74 errno_t
mbuf_align_32(mbuf_t mbuf
, size_t len
)
76 if ((mbuf
->m_flags
& M_EXT
) != 0 && m_mclhasreference(mbuf
))
78 mbuf
->m_data
= mbuf_datastart(mbuf
);
79 mbuf
->m_data
+= ((mbuf_trailingspace(mbuf
) - len
) &~ (sizeof(u_int32_t
) - 1));
84 addr64_t
mbuf_data_to_physical(void* ptr
)
86 return (addr64_t
)(intptr_t)mcl_to_paddr(ptr
);
89 errno_t
mbuf_get(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
91 /* Must set *mbuf to NULL in failure case */
92 *mbuf
= m_get(how
, type
);
94 return (*mbuf
== NULL
) ? ENOMEM
: 0;
97 errno_t
mbuf_gethdr(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
99 /* Must set *mbuf to NULL in failure case */
100 *mbuf
= m_gethdr(how
, type
);
102 return (*mbuf
== NULL
) ? ENOMEM
: 0;
106 mbuf_attachcluster(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
,
107 caddr_t extbuf
, void (*extfree
)(caddr_t
, u_int
, caddr_t
),
108 size_t extsize
, caddr_t extarg
)
110 if (extbuf
== NULL
|| extfree
== NULL
|| extsize
== 0)
113 if ((*mbuf
= m_clattach(mbuf
!= NULL
? *mbuf
: NULL
, type
, extbuf
,
114 extfree
, extsize
, extarg
, how
)) == NULL
)
121 mbuf_alloccluster(mbuf_how_t how
, size_t *size
, caddr_t
*addr
)
123 if (size
== NULL
|| *size
== 0 || addr
== NULL
)
128 /* Jumbo cluster pool not available? */
129 if (*size
> NBPG
&& njcl
== 0)
132 if (*size
<= MCLBYTES
&& (*addr
= m_mclalloc(how
)) != NULL
)
134 else if (*size
> MCLBYTES
&& *size
<= NBPG
&&
135 (*addr
= m_bigalloc(how
)) != NULL
)
137 else if (*size
> NBPG
&& *size
<= M16KCLBYTES
&&
138 (*addr
= m_16kalloc(how
)) != NULL
)
150 mbuf_freecluster(caddr_t addr
, size_t size
)
152 if (size
!= MCLBYTES
&& size
!= NBPG
&& size
!= M16KCLBYTES
)
153 panic("%s: invalid size (%ld) for cluster %p", __func__
,
156 if (size
== MCLBYTES
)
158 else if (size
== NBPG
)
159 m_bigfree(addr
, NBPG
, NULL
);
161 m_16kfree(addr
, M16KCLBYTES
, NULL
);
163 panic("%s: freeing jumbo cluster to an empty pool", __func__
);
167 mbuf_getcluster(mbuf_how_t how
, mbuf_type_t type
, size_t size
, mbuf_t
* mbuf
)
169 /* Must set *mbuf to NULL in failure case */
176 *mbuf
= m_get(how
, type
);
182 * At the time this code was written, m_{mclget,mbigget,m16kget}
183 * would always return the same value that was passed in to it.
185 if (size
== MCLBYTES
) {
186 *mbuf
= m_mclget(*mbuf
, how
);
187 } else if (size
== NBPG
) {
188 *mbuf
= m_mbigget(*mbuf
, how
);
189 } else if (size
== M16KCLBYTES
) {
191 *mbuf
= m_m16kget(*mbuf
, how
);
193 /* Jumbo cluster pool not available? */
201 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0)
204 if (created
&& error
!= 0) {
211 errno_t
mbuf_mclget(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
213 /* Must set *mbuf to NULL in failure case */
216 if (mbuf
== NULL
) return EINVAL
;
218 error
= mbuf_get(how
, type
, mbuf
);
225 * At the time this code was written, m_mclget would always
226 * return the same value that was passed in to it.
228 *mbuf
= m_mclget(*mbuf
, how
);
230 if (created
&& ((*mbuf
)->m_flags
& M_EXT
) == 0) {
234 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0)
240 errno_t
mbuf_getpacket(mbuf_how_t how
, mbuf_t
*mbuf
)
242 /* Must set *mbuf to NULL in failure case */
245 *mbuf
= m_getpacket_how(how
);
248 if (how
== MBUF_WAITOK
)
257 mbuf_t
mbuf_free(mbuf_t mbuf
)
262 void mbuf_freem(mbuf_t mbuf
)
267 int mbuf_freem_list(mbuf_t mbuf
)
269 return m_freem_list(mbuf
);
272 size_t mbuf_leadingspace(const mbuf_t mbuf
)
274 return m_leadingspace(mbuf
);
277 size_t mbuf_trailingspace(const mbuf_t mbuf
)
279 return m_trailingspace(mbuf
);
283 errno_t
mbuf_copym(const mbuf_t src
, size_t offset
, size_t len
,
284 mbuf_how_t how
, mbuf_t
*new_mbuf
)
286 /* Must set *mbuf to NULL in failure case */
287 *new_mbuf
= m_copym(src
, offset
, len
, how
);
289 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
292 errno_t
mbuf_dup(const mbuf_t src
, mbuf_how_t how
, mbuf_t
*new_mbuf
)
294 /* Must set *new_mbuf to NULL in failure case */
295 *new_mbuf
= m_dup(src
, how
);
297 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
300 errno_t
mbuf_prepend(mbuf_t
*orig
, size_t len
, mbuf_how_t how
)
302 /* Must set *orig to NULL in failure case */
303 *orig
= m_prepend_2(*orig
, len
, how
);
305 return (*orig
== NULL
) ? ENOMEM
: 0;
308 errno_t
mbuf_split(mbuf_t src
, size_t offset
,
309 mbuf_how_t how
, mbuf_t
*new_mbuf
)
311 /* Must set *new_mbuf to NULL in failure case */
312 *new_mbuf
= m_split(src
, offset
, how
);
314 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
317 errno_t
mbuf_pullup(mbuf_t
*mbuf
, size_t len
)
319 /* Must set *mbuf to NULL in failure case */
320 *mbuf
= m_pullup(*mbuf
, len
);
322 return (*mbuf
== NULL
) ? ENOMEM
: 0;
325 errno_t
mbuf_pulldown(mbuf_t src
, size_t *offset
, size_t len
, mbuf_t
*location
)
327 /* Must set *location to NULL in failure case */
329 *location
= m_pulldown(src
, *offset
, len
, &new_offset
);
330 *offset
= new_offset
;
332 return (*location
== NULL
) ? ENOMEM
: 0;
335 void mbuf_adj(mbuf_t mbuf
, int len
)
340 errno_t
mbuf_adjustlen(mbuf_t m
, int amount
)
342 /* Verify m_len will be valid after adding amount */
344 int used
= (size_t)mbuf_data(m
) - (size_t)mbuf_datastart(m
) +
347 if ((size_t)(amount
+ used
) > mbuf_maxlen(m
))
350 else if (-amount
> m
->m_len
) {
359 mbuf_concatenate(mbuf_t dst
, mbuf_t src
)
366 /* return dst as is in the current implementation */
369 errno_t
mbuf_copydata(const mbuf_t m0
, size_t off
, size_t len
, void* out_data
)
371 /* Copied m_copydata, added error handling (don't just panic) */
378 if (off
< (size_t)m
->m_len
)
386 count
= m
->m_len
- off
> len
? len
: m
->m_len
- off
;
387 bcopy(mtod(m
, caddr_t
) + off
, out_data
, count
);
389 out_data
= ((char*)out_data
) + count
;
397 int mbuf_mclhasreference(mbuf_t mbuf
)
399 if ((mbuf
->m_flags
& M_EXT
))
400 return m_mclhasreference(mbuf
);
407 mbuf_t
mbuf_next(const mbuf_t mbuf
)
412 errno_t
mbuf_setnext(mbuf_t mbuf
, mbuf_t next
)
414 if (next
&& ((next
)->m_nextpkt
!= NULL
||
415 (next
)->m_type
== MT_FREE
)) return EINVAL
;
421 mbuf_t
mbuf_nextpkt(const mbuf_t mbuf
)
423 return mbuf
->m_nextpkt
;
426 void mbuf_setnextpkt(mbuf_t mbuf
, mbuf_t nextpkt
)
428 mbuf
->m_nextpkt
= nextpkt
;
431 size_t mbuf_len(const mbuf_t mbuf
)
436 void mbuf_setlen(mbuf_t mbuf
, size_t len
)
441 size_t mbuf_maxlen(const mbuf_t mbuf
)
443 if (mbuf
->m_flags
& M_EXT
)
444 return mbuf
->m_ext
.ext_size
;
445 return &mbuf
->m_dat
[MLEN
] - ((char*)mbuf_datastart(mbuf
));
448 mbuf_type_t
mbuf_type(const mbuf_t mbuf
)
453 errno_t
mbuf_settype(mbuf_t mbuf
, mbuf_type_t new_type
)
455 if (new_type
== MBUF_TYPE_FREE
) return EINVAL
;
457 m_mchtype(mbuf
, new_type
);
462 mbuf_flags_t
mbuf_flags(const mbuf_t mbuf
)
464 return mbuf
->m_flags
& mbuf_flags_mask
;
467 errno_t
mbuf_setflags(mbuf_t mbuf
, mbuf_flags_t flags
)
469 if ((flags
& ~mbuf_flags_mask
) != 0) return EINVAL
;
470 mbuf
->m_flags
= flags
|
471 (mbuf
->m_flags
& ~mbuf_flags_mask
);
476 errno_t
mbuf_setflags_mask(mbuf_t mbuf
, mbuf_flags_t flags
, mbuf_flags_t mask
)
478 if (((flags
| mask
) & ~mbuf_flags_mask
) != 0) return EINVAL
;
480 mbuf
->m_flags
= (flags
& mask
) | (mbuf
->m_flags
& ~mask
);
485 errno_t
mbuf_copy_pkthdr(mbuf_t dest
, const mbuf_t src
)
487 if (((src
)->m_flags
& M_PKTHDR
) == 0)
490 m_copy_pkthdr(dest
, src
);
495 size_t mbuf_pkthdr_len(const mbuf_t mbuf
)
497 return mbuf
->m_pkthdr
.len
;
500 void mbuf_pkthdr_setlen(mbuf_t mbuf
, size_t len
)
502 mbuf
->m_pkthdr
.len
= len
;
505 void mbuf_pkthdr_adjustlen(mbuf_t mbuf
, int amount
)
507 mbuf
->m_pkthdr
.len
+= amount
;
510 ifnet_t
mbuf_pkthdr_rcvif(const mbuf_t mbuf
)
512 // If we reference count ifnets, we should take a reference here before returning
513 return mbuf
->m_pkthdr
.rcvif
;
516 errno_t
mbuf_pkthdr_setrcvif(mbuf_t mbuf
, ifnet_t ifnet
)
518 /* May want to walk ifnet list to determine if interface is valid */
519 mbuf
->m_pkthdr
.rcvif
= (struct ifnet
*)ifnet
;
523 void* mbuf_pkthdr_header(const mbuf_t mbuf
)
525 return mbuf
->m_pkthdr
.header
;
528 void mbuf_pkthdr_setheader(mbuf_t mbuf
, void *header
)
530 mbuf
->m_pkthdr
.header
= (void*)header
;
534 mbuf_inbound_modified(mbuf_t mbuf
)
536 /* Invalidate hardware generated checksum flags */
537 mbuf
->m_pkthdr
.csum_flags
= 0;
540 extern void in_cksum_offset(struct mbuf
* m
, size_t ip_offset
);
541 extern void in_delayed_cksum_offset(struct mbuf
*m
, int ip_offset
);
544 mbuf_outbound_finalize(mbuf_t mbuf
, u_int32_t protocol_family
, size_t protocol_offset
)
546 if ((mbuf
->m_pkthdr
.csum_flags
&
547 (CSUM_DELAY_DATA
| CSUM_DELAY_IP
| CSUM_TCP_SUM16
)) == 0)
550 /* Generate the packet in software, client needs it */
551 switch (protocol_family
) {
553 if (mbuf
->m_pkthdr
.csum_flags
& CSUM_TCP_SUM16
) {
555 * If you're wondering where this lovely code comes
556 * from, we're trying to undo what happens in ip_output.
557 * Look for CSUM_TCP_SUM16 in ip_output.
559 u_int16_t first
, second
;
560 mbuf
->m_pkthdr
.csum_flags
&= ~CSUM_TCP_SUM16
;
561 mbuf
->m_pkthdr
.csum_flags
|= CSUM_TCP
;
562 first
= mbuf
->m_pkthdr
.csum_data
>> 16;
563 second
= mbuf
->m_pkthdr
.csum_data
& 0xffff;
564 mbuf
->m_pkthdr
.csum_data
= first
- second
;
566 if (mbuf
->m_pkthdr
.csum_flags
& CSUM_DELAY_DATA
) {
567 in_delayed_cksum_offset(mbuf
, protocol_offset
);
570 if (mbuf
->m_pkthdr
.csum_flags
& CSUM_DELAY_IP
) {
571 in_cksum_offset(mbuf
, protocol_offset
);
574 mbuf
->m_pkthdr
.csum_flags
&= ~(CSUM_DELAY_DATA
| CSUM_DELAY_IP
);
579 * Not sure what to do here if anything.
580 * Hardware checksum code looked pretty IPv4 specific.
582 if ((mbuf
->m_pkthdr
.csum_flags
& (CSUM_DELAY_DATA
| CSUM_DELAY_IP
)) != 0)
583 panic("mbuf_outbound_finalize - CSUM flags set for non-IPv4 packet (%u)!\n", protocol_family
);
592 mbuf
->m_pkthdr
.csum_flags
|= CSUM_VLAN_TAG_VALID
;
593 mbuf
->m_pkthdr
.vlan_tag
= vlan
;
603 if ((mbuf
->m_pkthdr
.csum_flags
& CSUM_VLAN_TAG_VALID
) == 0)
604 return ENXIO
; // No vlan tag set
606 *vlan
= mbuf
->m_pkthdr
.vlan_tag
;
615 mbuf
->m_pkthdr
.csum_flags
&= ~CSUM_VLAN_TAG_VALID
;
616 mbuf
->m_pkthdr
.vlan_tag
= 0;
621 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags
=
622 MBUF_CSUM_REQ_IP
| MBUF_CSUM_REQ_TCP
| MBUF_CSUM_REQ_UDP
| MBUF_CSUM_REQ_SUM16
;
625 mbuf_set_csum_requested(
627 mbuf_csum_request_flags_t request
,
630 request
&= mbuf_valid_csum_request_flags
;
631 mbuf
->m_pkthdr
.csum_flags
= (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | request
;
632 mbuf
->m_pkthdr
.csum_data
= value
;
637 static const mbuf_tso_request_flags_t mbuf_valid_tso_request_flags
=
638 MBUF_TSO_IPV4
| MBUF_TSO_IPV6
;
641 mbuf_get_tso_requested(
643 mbuf_tso_request_flags_t
*request
,
646 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
647 request
== NULL
|| value
== NULL
)
650 *request
= mbuf
->m_pkthdr
.csum_flags
;
651 *request
&= mbuf_valid_tso_request_flags
;
652 if (*request
&& value
!= NULL
)
653 *value
= mbuf
->m_pkthdr
.tso_segsz
;
659 mbuf_get_csum_requested(
661 mbuf_csum_request_flags_t
*request
,
664 *request
= mbuf
->m_pkthdr
.csum_flags
;
665 *request
&= mbuf_valid_csum_request_flags
;
667 *value
= mbuf
->m_pkthdr
.csum_data
;
674 mbuf_clear_csum_requested(
677 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
678 mbuf
->m_pkthdr
.csum_data
= 0;
683 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags
=
684 MBUF_CSUM_DID_IP
| MBUF_CSUM_IP_GOOD
| MBUF_CSUM_DID_DATA
|
685 MBUF_CSUM_PSEUDO_HDR
| MBUF_CSUM_TCP_SUM16
;
688 mbuf_set_csum_performed(
690 mbuf_csum_performed_flags_t performed
,
693 performed
&= mbuf_valid_csum_performed_flags
;
694 mbuf
->m_pkthdr
.csum_flags
= (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | performed
;
695 mbuf
->m_pkthdr
.csum_data
= value
;
701 mbuf_get_csum_performed(
703 mbuf_csum_performed_flags_t
*performed
,
706 *performed
= mbuf
->m_pkthdr
.csum_flags
& mbuf_valid_csum_performed_flags
;
707 *value
= mbuf
->m_pkthdr
.csum_data
;
713 mbuf_clear_csum_performed(
716 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
717 mbuf
->m_pkthdr
.csum_data
= 0;
723 mbuf_inet_cksum(mbuf_t mbuf
, int protocol
, u_int32_t offset
, u_int32_t length
,
726 if (mbuf
== NULL
|| length
== 0 || csum
== NULL
||
727 (u_int32_t
)mbuf
->m_pkthdr
.len
< (offset
+ length
))
730 *csum
= inet_cksum(mbuf
, protocol
, offset
, length
);
736 mbuf_inet6_cksum(mbuf_t mbuf
, int protocol
, u_int32_t offset
, u_int32_t length
,
739 if (mbuf
== NULL
|| length
== 0 || csum
== NULL
||
740 (u_int32_t
)mbuf
->m_pkthdr
.len
< (offset
+ length
))
743 *csum
= inet6_cksum(mbuf
, protocol
, offset
, length
);
748 mbuf_inet6_cksum(__unused mbuf_t mbuf
, __unused
int protocol
,
749 __unused u_int32_t offset
, __unused u_int32_t length
,
750 __unused u_int16_t
*csum
)
752 panic("mbuf_inet6_cksum() doesn't exist on this platform\n");
757 inet6_cksum(__unused
struct mbuf
*m
, __unused
unsigned int nxt
,
758 __unused
unsigned int off
, __unused
unsigned int len
)
760 panic("inet6_cksum() doesn't exist on this platform\n");
764 void nd6_lookup_ipv6(void);
766 nd6_lookup_ipv6(void)
768 panic("nd6_lookup_ipv6() doesn't exist on this platform\n");
772 in6addr_local(__unused
struct in6_addr
*a
)
774 panic("in6addr_local() doesn't exist on this platform\n");
778 void nd6_storelladdr(void);
780 nd6_storelladdr(void)
782 panic("nd6_storelladdr() doesn't exist on this platform\n");
790 #define MTAG_FIRST_ID FIRST_KPI_STR_ID
795 mbuf_tag_id_t
*out_id
)
797 return net_str_id_find_internal(string
, out_id
, NSI_MBUF_TAG
, 1);
804 mbuf_tag_type_t type
,
810 u_int32_t mtag_id_first
, mtag_id_last
;
815 /* Sanity check parameters */
816 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
, NSI_MBUF_TAG
);
817 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< mtag_id_first
||
818 id
> mtag_id_last
|| length
< 1 || (length
& 0xffff0000) != 0 ||
823 /* Make sure this mtag hasn't already been allocated */
824 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
829 /* Allocate an mtag */
830 tag
= m_tag_alloc(id
, type
, length
, how
);
832 return how
== M_WAITOK
? ENOMEM
: EWOULDBLOCK
;
835 /* Attach the mtag and set *data_p */
836 m_tag_prepend(mbuf
, tag
);
846 mbuf_tag_type_t type
,
851 u_int32_t mtag_id_first
, mtag_id_last
;
858 /* Sanity check parameters */
859 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
, NSI_MBUF_TAG
);
860 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< mtag_id_first
||
861 id
> mtag_id_last
|| length
== NULL
|| data_p
== NULL
) {
866 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
871 /* Copy out the pointer to the data and the lenght value */
872 *length
= tag
->m_tag_len
;
882 mbuf_tag_type_t type
)
885 u_int32_t mtag_id_first
, mtag_id_last
;
887 /* Sanity check parameters */
888 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
, NSI_MBUF_TAG
);
889 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< mtag_id_first
||
893 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
898 m_tag_delete(mbuf
, tag
);
903 void mbuf_stats(struct mbuf_stat
*stats
)
905 stats
->mbufs
= mbstat
.m_mbufs
;
906 stats
->clusters
= mbstat
.m_clusters
;
907 stats
->clfree
= mbstat
.m_clfree
;
908 stats
->drops
= mbstat
.m_drops
;
909 stats
->wait
= mbstat
.m_wait
;
910 stats
->drain
= mbstat
.m_drain
;
911 __builtin_memcpy(stats
->mtypes
, mbstat
.m_mtypes
, sizeof(stats
->mtypes
));
912 stats
->mcfail
= mbstat
.m_mcfail
;
913 stats
->mpfail
= mbstat
.m_mpfail
;
914 stats
->msize
= mbstat
.m_msize
;
915 stats
->mclbytes
= mbstat
.m_mclbytes
;
916 stats
->minclsize
= mbstat
.m_minclsize
;
917 stats
->mlen
= mbstat
.m_mlen
;
918 stats
->mhlen
= mbstat
.m_mhlen
;
919 stats
->bigclusters
= mbstat
.m_bigclusters
;
920 stats
->bigclfree
= mbstat
.m_bigclfree
;
921 stats
->bigmclbytes
= mbstat
.m_bigmclbytes
;
925 mbuf_allocpacket(mbuf_how_t how
, size_t packetlen
, unsigned int *maxchunks
, mbuf_t
*mbuf
)
929 unsigned int numpkts
= 1;
930 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
932 if (packetlen
== 0) {
936 m
= m_allocpacket_internal(&numpkts
, packetlen
, maxchunks
? &numchunks
: NULL
, how
, 1, 0);
938 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
)
944 *maxchunks
= numchunks
;
953 mbuf_allocpacket_list(unsigned int numpkts
, mbuf_how_t how
, size_t packetlen
, unsigned int *maxchunks
, mbuf_t
*mbuf
)
957 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
963 if (packetlen
== 0) {
967 m
= m_allocpacket_internal(&numpkts
, packetlen
, maxchunks
? &numchunks
: NULL
, how
, 1, 0);
969 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
)
975 *maxchunks
= numchunks
;
986 * mbuf_copyback differs from m_copyback in a few ways:
987 * 1) mbuf_copyback will allocate clusters for new mbufs we append
988 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
989 * 3) mbuf_copyback reports whether or not the operation succeeded
990 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
1005 const char *cp
= data
;
1007 if (m
== NULL
|| len
== 0 || data
== NULL
)
1010 while (off
> (mlen
= m
->m_len
)) {
1013 if (m
->m_next
== 0) {
1014 n
= m_getclr(how
, m
->m_type
);
1019 n
->m_len
= MIN(MLEN
, len
+ off
);
1026 mlen
= MIN(m
->m_len
- off
, len
);
1027 if (mlen
< len
&& m
->m_next
== NULL
&& mbuf_trailingspace(m
) > 0) {
1028 size_t grow
= MIN(mbuf_trailingspace(m
), len
- mlen
);
1032 bcopy(cp
, off
+ (char*)mbuf_data(m
), (unsigned)mlen
);
1040 if (m
->m_next
== 0) {
1041 n
= m_get(how
, m
->m_type
);
1046 if (len
> MINCLSIZE
) {
1047 /* cluter allocation failure is okay, we can grow chain */
1048 mbuf_mclget(how
, m
->m_type
, &n
);
1050 n
->m_len
= MIN(mbuf_maxlen(n
), len
);
1057 if ((m_start
->m_flags
& M_PKTHDR
) && (m_start
->m_pkthdr
.len
< totlen
))
1058 m_start
->m_pkthdr
.len
= totlen
;
1070 mbuf_get_mhlen(void)