2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 //#include <sys/kpi_interface.h>
26 #include <sys/param.h>
28 #include <sys/socket.h>
29 #include <kern/debug.h>
30 #include <libkern/OSAtomic.h>
31 #include <kern/kalloc.h>
34 void mbuf_tag_id_first_last(u_long
*first
, u_long
*last
);
35 errno_t
mbuf_tag_id_find_internal(const char *string
, u_long
*out_id
, int create
);
37 static const mbuf_flags_t mbuf_flags_mask
= MBUF_EXT
| MBUF_PKTHDR
| MBUF_EOR
|
38 MBUF_BCAST
| MBUF_MCAST
| MBUF_FRAG
| MBUF_FIRSTFRAG
|
39 MBUF_LASTFRAG
| MBUF_PROMISC
;
41 void* mbuf_data(mbuf_t mbuf
)
46 void* mbuf_datastart(mbuf_t mbuf
)
48 if (mbuf
->m_flags
& M_EXT
)
49 return mbuf
->m_ext
.ext_buf
;
50 if (mbuf
->m_flags
& M_PKTHDR
)
51 return mbuf
->m_pktdat
;
55 errno_t
mbuf_setdata(mbuf_t mbuf
, void* data
, size_t len
)
57 size_t start
= (size_t)((char*)mbuf_datastart(mbuf
));
58 size_t maxlen
= mbuf_maxlen(mbuf
);
60 if ((size_t)data
< start
|| ((size_t)data
) + len
> start
+ maxlen
)
68 errno_t
mbuf_align_32(mbuf_t mbuf
, size_t len
)
70 if ((mbuf
->m_flags
& M_EXT
) != 0 && m_mclhasreference(mbuf
))
72 mbuf
->m_data
= mbuf_datastart(mbuf
);
73 mbuf
->m_data
+= ((mbuf_trailingspace(mbuf
) - len
) &~ (sizeof(u_int32_t
) - 1));
78 addr64_t
mbuf_data_to_physical(void* ptr
)
80 return (addr64_t
)mcl_to_paddr(ptr
);
83 errno_t
mbuf_get(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
85 /* Must set *mbuf to NULL in failure case */
86 *mbuf
= m_get(how
, type
);
88 return (*mbuf
== NULL
) ? ENOMEM
: 0;
91 errno_t
mbuf_gethdr(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
93 /* Must set *mbuf to NULL in failure case */
94 *mbuf
= m_gethdr(how
, type
);
96 return (*mbuf
== NULL
) ? ENOMEM
: 0;
99 extern struct mbuf
* m_mbigget(struct mbuf
*m
, int nowait
);
101 errno_t
mbuf_getcluster(mbuf_how_t how
, mbuf_type_t type
, size_t size
, mbuf_t
* mbuf
)
103 /* Must set *mbuf to NULL in failure case */
110 *mbuf
= m_get(how
, type
);
116 * At the time this code was written, m_mclget and m_mbigget would always
117 * return the same value that was passed in to it.
119 if (size
== MCLBYTES
) {
120 *mbuf
= m_mclget(*mbuf
, how
);
121 } else if (size
== NBPG
) {
122 *mbuf
= m_mbigget(*mbuf
, how
);
127 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0)
130 if (created
&& error
!= 0) {
138 errno_t
mbuf_mclget(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
140 /* Must set *mbuf to NULL in failure case */
143 if (mbuf
== NULL
) return EINVAL
;
145 error
= mbuf_get(how
, type
, mbuf
);
152 * At the time this code was written, m_mclget would always
153 * return the same value that was passed in to it.
155 *mbuf
= m_mclget(*mbuf
, how
);
157 if (created
&& ((*mbuf
)->m_flags
& M_EXT
) == 0) {
161 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0)
167 errno_t
mbuf_getpacket(mbuf_how_t how
, mbuf_t
*mbuf
)
169 /* Must set *mbuf to NULL in failure case */
172 *mbuf
= m_getpacket_how(how
);
175 if (how
== MBUF_WAITOK
)
184 mbuf_t
mbuf_free(mbuf_t mbuf
)
189 void mbuf_freem(mbuf_t mbuf
)
194 int mbuf_freem_list(mbuf_t mbuf
)
196 return m_freem_list(mbuf
);
199 size_t mbuf_leadingspace(mbuf_t mbuf
)
201 return m_leadingspace(mbuf
);
204 size_t mbuf_trailingspace(mbuf_t mbuf
)
206 return m_trailingspace(mbuf
);
210 errno_t
mbuf_copym(mbuf_t src
, size_t offset
, size_t len
,
211 mbuf_how_t how
, mbuf_t
*new_mbuf
)
213 /* Must set *mbuf to NULL in failure case */
214 *new_mbuf
= m_copym(src
, offset
, len
, how
);
216 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
219 errno_t
mbuf_dup(mbuf_t src
, mbuf_how_t how
, mbuf_t
*new_mbuf
)
221 /* Must set *new_mbuf to NULL in failure case */
222 *new_mbuf
= m_dup(src
, how
);
224 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
227 errno_t
mbuf_prepend(mbuf_t
*orig
, size_t len
, mbuf_how_t how
)
229 /* Must set *orig to NULL in failure case */
230 *orig
= m_prepend_2(*orig
, len
, how
);
232 return (*orig
== NULL
) ? ENOMEM
: 0;
235 errno_t
mbuf_split(mbuf_t src
, size_t offset
,
236 mbuf_how_t how
, mbuf_t
*new_mbuf
)
238 /* Must set *new_mbuf to NULL in failure case */
239 *new_mbuf
= m_split(src
, offset
, how
);
241 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
244 errno_t
mbuf_pullup(mbuf_t
*mbuf
, size_t len
)
246 /* Must set *mbuf to NULL in failure case */
247 *mbuf
= m_pullup(*mbuf
, len
);
249 return (*mbuf
== NULL
) ? ENOMEM
: 0;
252 errno_t
mbuf_pulldown(mbuf_t src
, size_t *offset
, size_t len
, mbuf_t
*location
)
254 /* Must set *location to NULL in failure case */
256 *location
= m_pulldown(src
, *offset
, len
, &new_offset
);
257 *offset
= new_offset
;
259 return (*location
== NULL
) ? ENOMEM
: 0;
262 void mbuf_adj(mbuf_t mbuf
, int len
)
267 errno_t
mbuf_copydata(mbuf_t m
, size_t off
, size_t len
, void* out_data
)
269 /* Copied m_copydata, added error handling (don't just panic) */
275 if (off
< (size_t)m
->m_len
)
283 count
= m
->m_len
- off
> len
? len
: m
->m_len
- off
;
284 bcopy(mtod(m
, caddr_t
) + off
, out_data
, count
);
286 out_data
= ((char*)out_data
) + count
;
294 int mbuf_mclref(mbuf_t mbuf
)
296 return m_mclref(mbuf
);
299 int mbuf_mclunref(mbuf_t mbuf
)
301 return m_mclunref(mbuf
);
304 int mbuf_mclhasreference(mbuf_t mbuf
)
306 if ((mbuf
->m_flags
& M_EXT
))
307 return m_mclhasreference(mbuf
);
314 mbuf_t
mbuf_next(mbuf_t mbuf
)
319 errno_t
mbuf_setnext(mbuf_t mbuf
, mbuf_t next
)
321 if (next
&& ((next
)->m_nextpkt
!= NULL
||
322 (next
)->m_type
== MT_FREE
)) return EINVAL
;
328 mbuf_t
mbuf_nextpkt(mbuf_t mbuf
)
330 return mbuf
->m_nextpkt
;
333 void mbuf_setnextpkt(mbuf_t mbuf
, mbuf_t nextpkt
)
335 mbuf
->m_nextpkt
= nextpkt
;
338 size_t mbuf_len(mbuf_t mbuf
)
343 void mbuf_setlen(mbuf_t mbuf
, size_t len
)
348 size_t mbuf_maxlen(mbuf_t mbuf
)
350 if (mbuf
->m_flags
& M_EXT
)
351 return mbuf
->m_ext
.ext_size
;
352 return &mbuf
->m_dat
[MLEN
] - ((char*)mbuf_datastart(mbuf
));
355 mbuf_type_t
mbuf_type(mbuf_t mbuf
)
360 errno_t
mbuf_settype(mbuf_t mbuf
, mbuf_type_t new_type
)
362 if (new_type
== MBUF_TYPE_FREE
) return EINVAL
;
364 m_mchtype(mbuf
, new_type
);
369 mbuf_flags_t
mbuf_flags(mbuf_t mbuf
)
371 return mbuf
->m_flags
& mbuf_flags_mask
;
374 errno_t
mbuf_setflags(mbuf_t mbuf
, mbuf_flags_t flags
)
376 if ((flags
& ~mbuf_flags_mask
) != 0) return EINVAL
;
377 mbuf
->m_flags
= flags
|
378 (mbuf
->m_flags
& ~mbuf_flags_mask
);
383 errno_t
mbuf_setflags_mask(mbuf_t mbuf
, mbuf_flags_t flags
, mbuf_flags_t mask
)
385 if (((flags
| mask
) & ~mbuf_flags_mask
) != 0) return EINVAL
;
387 mbuf
->m_flags
= (flags
& mask
) | (mbuf
->m_flags
& ~mask
);
392 errno_t
mbuf_copy_pkthdr(mbuf_t dest
, mbuf_t src
)
394 if (((src
)->m_flags
& M_PKTHDR
) == 0)
397 m_copy_pkthdr(dest
, src
);
402 size_t mbuf_pkthdr_len(mbuf_t mbuf
)
404 return mbuf
->m_pkthdr
.len
;
407 void mbuf_pkthdr_setlen(mbuf_t mbuf
, size_t len
)
409 mbuf
->m_pkthdr
.len
= len
;
412 ifnet_t
mbuf_pkthdr_rcvif(mbuf_t mbuf
)
414 // If we reference count ifnets, we should take a reference here before returning
415 return mbuf
->m_pkthdr
.rcvif
;
418 errno_t
mbuf_pkthdr_setrcvif(mbuf_t mbuf
, ifnet_t ifnet
)
420 /* May want to walk ifnet list to determine if interface is valid */
421 mbuf
->m_pkthdr
.rcvif
= (struct ifnet
*)ifnet
;
425 void* mbuf_pkthdr_header(mbuf_t mbuf
)
427 return mbuf
->m_pkthdr
.header
;
430 void mbuf_pkthdr_setheader(mbuf_t mbuf
, void *header
)
432 mbuf
->m_pkthdr
.header
= (void*)header
;
436 errno_t
mbuf_aux_add(mbuf_t mbuf
, int family
, mbuf_type_t type
, mbuf_t
*aux_mbuf
)
438 *aux_mbuf
= m_aux_add(mbuf
, family
, type
);
439 return (*aux_mbuf
== NULL
) ? ENOMEM
: 0;
442 mbuf_t
mbuf_aux_find(mbuf_t mbuf
, int family
, mbuf_type_t type
)
444 return m_aux_find(mbuf
, family
, type
);
447 void mbuf_aux_delete(mbuf_t mbuf
, mbuf_t aux
)
449 m_aux_delete(mbuf
, aux
);
453 mbuf_inbound_modified(mbuf_t mbuf
)
455 /* Invalidate hardware generated checksum flags */
456 mbuf
->m_pkthdr
.csum_flags
= 0;
459 extern void in_cksum_offset(struct mbuf
* m
, size_t ip_offset
);
460 extern void in_delayed_cksum_offset(struct mbuf
*m
, int ip_offset
);
463 mbuf_outbound_finalize(mbuf_t mbuf
, u_long protocol_family
, size_t protocol_offset
)
465 if ((mbuf
->m_pkthdr
.csum_flags
& (CSUM_DELAY_DATA
| CSUM_DELAY_IP
)) == 0)
468 /* Generate the packet in software, client needs it */
469 switch (protocol_family
) {
471 if (mbuf
->m_pkthdr
.csum_flags
& CSUM_DELAY_DATA
) {
472 in_delayed_cksum_offset(mbuf
, protocol_offset
);
475 if (mbuf
->m_pkthdr
.csum_flags
& CSUM_DELAY_IP
) {
476 in_cksum_offset(mbuf
, protocol_offset
);
479 mbuf
->m_pkthdr
.csum_flags
&= ~(CSUM_DELAY_DATA
| CSUM_DELAY_IP
);
484 * Not sure what to do here if anything.
485 * Hardware checksum code looked pretty IPv4 specific.
487 if ((mbuf
->m_pkthdr
.csum_flags
& (CSUM_DELAY_DATA
| CSUM_DELAY_IP
)) != 0)
488 panic("mbuf_outbound_finalize - CSUM flags set for non-IPv4 packet (%d)!\n", protocol_family
);
497 mbuf
->m_pkthdr
.csum_flags
|= CSUM_VLAN_TAG_VALID
;
498 mbuf
->m_pkthdr
.vlan_tag
= vlan
;
508 if ((mbuf
->m_pkthdr
.csum_flags
& CSUM_VLAN_TAG_VALID
) == 0)
509 return ENXIO
; // No vlan tag set
511 *vlan
= mbuf
->m_pkthdr
.vlan_tag
;
520 mbuf
->m_pkthdr
.csum_flags
&= ~CSUM_VLAN_TAG_VALID
;
521 mbuf
->m_pkthdr
.vlan_tag
= 0;
526 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags
=
527 MBUF_CSUM_REQ_IP
| MBUF_CSUM_REQ_TCP
| MBUF_CSUM_REQ_UDP
| MBUF_CSUM_REQ_SUM16
;
530 mbuf_set_csum_requested(
532 mbuf_csum_request_flags_t request
,
535 request
&= mbuf_valid_csum_request_flags
;
536 mbuf
->m_pkthdr
.csum_flags
= (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | request
;
537 mbuf
->m_pkthdr
.csum_data
= value
;
543 mbuf_get_csum_requested(
545 mbuf_csum_request_flags_t
*request
,
548 *request
= mbuf
->m_pkthdr
.csum_flags
;
549 *request
&= mbuf_valid_csum_request_flags
;
551 *value
= mbuf
->m_pkthdr
.csum_data
;
558 mbuf_clear_csum_requested(
561 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
562 mbuf
->m_pkthdr
.csum_data
= 0;
567 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags
=
568 MBUF_CSUM_DID_IP
| MBUF_CSUM_IP_GOOD
| MBUF_CSUM_DID_DATA
|
569 MBUF_CSUM_PSEUDO_HDR
| MBUF_CSUM_TCP_SUM16
;
572 mbuf_set_csum_performed(
574 mbuf_csum_performed_flags_t performed
,
577 performed
&= mbuf_valid_csum_performed_flags
;
578 mbuf
->m_pkthdr
.csum_flags
= (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | performed
;
579 mbuf
->m_pkthdr
.csum_data
= value
;
585 mbuf_get_csum_performed(
587 mbuf_csum_performed_flags_t
*performed
,
590 *performed
= mbuf
->m_pkthdr
.csum_flags
& mbuf_valid_csum_performed_flags
;
591 *value
= mbuf
->m_pkthdr
.csum_data
;
597 mbuf_clear_csum_performed(
600 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
601 mbuf
->m_pkthdr
.csum_data
= 0;
610 struct mbuf_tag_id_entry
{
611 SLIST_ENTRY(mbuf_tag_id_entry
) next
;
616 #define MBUF_TAG_ID_ENTRY_SIZE(__str) \
617 ((size_t)&(((struct mbuf_tag_id_entry*)0)->string[0]) + \
620 #define MTAG_FIRST_ID 1000
621 static u_long mtag_id_next
= MTAG_FIRST_ID
;
622 static SLIST_HEAD(,mbuf_tag_id_entry
) mtag_id_list
= {NULL
};
623 static lck_mtx_t
*mtag_id_lock
= NULL
;
625 __private_extern__
void
626 mbuf_tag_id_first_last(
630 *first
= MTAG_FIRST_ID
;
631 *last
= mtag_id_next
- 1;
634 __private_extern__ errno_t
635 mbuf_tag_id_find_internal(
640 struct mbuf_tag_id_entry
*entry
= NULL
;
645 if (string
== NULL
|| out_id
== NULL
) {
649 /* Don't bother allocating the lock if we're only doing a lookup */
650 if (create
== 0 && mtag_id_lock
== NULL
)
653 /* Allocate lock if necessary */
654 if (mtag_id_lock
== NULL
) {
655 lck_grp_attr_t
*grp_attrib
= NULL
;
656 lck_attr_t
*lck_attrb
= NULL
;
657 lck_grp_t
*lck_group
= NULL
;
658 lck_mtx_t
*new_lock
= NULL
;
660 grp_attrib
= lck_grp_attr_alloc_init();
661 lck_grp_attr_setdefault(grp_attrib
);
662 lck_group
= lck_grp_alloc_init("mbuf_tag_allocate_id", grp_attrib
);
663 lck_grp_attr_free(grp_attrib
);
664 lck_attrb
= lck_attr_alloc_init();
665 lck_attr_setdefault(lck_attrb
);
666 lck_attr_setdebug(lck_attrb
);
667 new_lock
= lck_mtx_alloc_init(lck_group
, lck_attrb
);
668 if (!OSCompareAndSwap((UInt32
)0, (UInt32
)new_lock
, (UInt32
*)&mtag_id_lock
)) {
670 * If the atomic swap fails, someone else has already
671 * done this work. We can free the stuff we allocated.
673 lck_mtx_free(new_lock
, lck_group
);
674 lck_grp_free(lck_group
);
676 lck_attr_free(lck_attrb
);
679 /* Look for an existing entry */
680 lck_mtx_lock(mtag_id_lock
);
681 SLIST_FOREACH(entry
, &mtag_id_list
, next
) {
682 if (strcmp(string
, entry
->string
) == 0) {
689 lck_mtx_unlock(mtag_id_lock
);
693 entry
= kalloc(MBUF_TAG_ID_ENTRY_SIZE(string
));
695 lck_mtx_unlock(mtag_id_lock
);
699 strcpy(entry
->string
, string
);
700 entry
->id
= mtag_id_next
;
702 SLIST_INSERT_HEAD(&mtag_id_list
, entry
, next
);
704 lck_mtx_unlock(mtag_id_lock
);
714 mbuf_tag_id_t
*out_id
)
716 return mbuf_tag_id_find_internal(string
, (u_long
*)out_id
, 1);
723 mbuf_tag_type_t type
,
733 /* Sanity check parameters */
734 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< MTAG_FIRST_ID
||
735 id
>= mtag_id_next
|| length
< 1 || (length
& 0xffff0000) != 0 ||
740 /* Make sure this mtag hasn't already been allocated */
741 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
746 /* Allocate an mtag */
747 tag
= m_tag_alloc(id
, type
, length
, how
);
749 return how
== M_WAITOK
? ENOMEM
: EWOULDBLOCK
;
752 /* Attach the mtag and set *data_p */
753 m_tag_prepend(mbuf
, tag
);
763 mbuf_tag_type_t type
,
774 /* Sanity check parameters */
775 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< MTAG_FIRST_ID
||
776 id
>= mtag_id_next
|| length
== NULL
|| data_p
== NULL
) {
781 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
786 /* Copy out the pointer to the data and the lenght value */
787 *length
= tag
->m_tag_len
;
797 mbuf_tag_type_t type
)
801 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< MTAG_FIRST_ID
||
805 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
810 m_tag_delete(mbuf
, tag
);
815 void mbuf_stats(struct mbuf_stat
*stats
)
817 stats
->mbufs
= mbstat
.m_mbufs
;
818 stats
->clusters
= mbstat
.m_clusters
;
819 stats
->clfree
= mbstat
.m_clfree
;
820 stats
->drops
= mbstat
.m_drops
;
821 stats
->wait
= mbstat
.m_wait
;
822 stats
->drain
= mbstat
.m_drain
;
823 __builtin_memcpy(stats
->mtypes
, mbstat
.m_mtypes
, sizeof(stats
->mtypes
));
824 stats
->mcfail
= mbstat
.m_mcfail
;
825 stats
->mpfail
= mbstat
.m_mpfail
;
826 stats
->msize
= mbstat
.m_msize
;
827 stats
->mclbytes
= mbstat
.m_mclbytes
;
828 stats
->minclsize
= mbstat
.m_minclsize
;
829 stats
->mlen
= mbstat
.m_mlen
;
830 stats
->mhlen
= mbstat
.m_mhlen
;
831 stats
->bigclusters
= mbstat
.m_bigclusters
;
832 stats
->bigclfree
= mbstat
.m_bigclfree
;
833 stats
->bigmclbytes
= mbstat
.m_bigmclbytes
;
837 mbuf_allocpacket(mbuf_how_t how
, size_t packetlen
, unsigned int *maxchunks
, mbuf_t
*mbuf
)
841 unsigned int numpkts
= 1;
842 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
844 if (packetlen
== 0) {
848 m
= m_allocpacket_internal(&numpkts
, packetlen
, maxchunks
? &numchunks
: NULL
, how
, 1, 0);
850 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
)
864 * mbuf_copyback differs from m_copyback in a few ways:
865 * 1) mbuf_copyback will allocate clusters for new mbufs we append
866 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
867 * 3) mbuf_copyback reports whether or not the operation succeeded
868 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
883 const char *cp
= data
;
885 if (m
== NULL
|| len
== 0 || data
== NULL
)
888 while (off
> (mlen
= m
->m_len
)) {
891 if (m
->m_next
== 0) {
892 n
= m_getclr(how
, m
->m_type
);
897 n
->m_len
= MIN(MLEN
, len
+ off
);
904 mlen
= MIN(m
->m_len
- off
, len
);
905 if (mlen
< len
&& m
->m_next
== NULL
&& mbuf_trailingspace(m
) > 0) {
906 size_t grow
= MIN(mbuf_trailingspace(m
), len
- mlen
);
910 bcopy(cp
, off
+ (char*)mbuf_data(m
), (unsigned)mlen
);
918 if (m
->m_next
== 0) {
919 n
= m_get(how
, m
->m_type
);
924 if (len
> MINCLSIZE
) {
925 /* cluter allocation failure is okay, we can grow chain */
926 mbuf_mclget(how
, m
->m_type
, &n
);
928 n
->m_len
= MIN(mbuf_maxlen(n
), len
);
935 if ((m_start
->m_flags
& M_PKTHDR
) && (m_start
->m_pkthdr
.len
< totlen
))
936 m_start
->m_pkthdr
.len
= totlen
;