2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 //#include <sys/kpi_interface.h>
26 #include <sys/param.h>
28 #include <sys/socket.h>
29 #include <kern/debug.h>
30 #include <libkern/OSAtomic.h>
31 #include <kern/kalloc.h>
34 void mbuf_tag_id_first_last(u_long
*first
, u_long
*last
);
35 errno_t
mbuf_tag_id_find_internal(const char *string
, u_long
*out_id
, int create
);
37 static const mbuf_flags_t mbuf_flags_mask
= MBUF_EXT
| MBUF_PKTHDR
| MBUF_EOR
|
38 MBUF_BCAST
| MBUF_MCAST
| MBUF_FRAG
| MBUF_FIRSTFRAG
|
39 MBUF_LASTFRAG
| MBUF_PROMISC
;
41 void* mbuf_data(mbuf_t mbuf
)
46 void* mbuf_datastart(mbuf_t mbuf
)
48 if (mbuf
->m_flags
& M_EXT
)
49 return mbuf
->m_ext
.ext_buf
;
50 if (mbuf
->m_flags
& M_PKTHDR
)
51 return mbuf
->m_pktdat
;
55 errno_t
mbuf_setdata(mbuf_t mbuf
, void* data
, size_t len
)
57 size_t start
= (size_t)((char*)mbuf_datastart(mbuf
));
58 size_t maxlen
= mbuf_maxlen(mbuf
);
60 if ((size_t)data
< start
|| ((size_t)data
) + len
> start
+ maxlen
)
68 errno_t
mbuf_align_32(mbuf_t mbuf
, size_t len
)
70 if ((mbuf
->m_flags
& M_EXT
) != 0 && m_mclhasreference(mbuf
))
72 mbuf
->m_data
= mbuf_datastart(mbuf
);
73 mbuf
->m_data
+= ((mbuf_trailingspace(mbuf
) - len
) &~ (sizeof(u_int32_t
) - 1));
78 addr64_t
mbuf_data_to_physical(void* ptr
)
80 return (addr64_t
)mcl_to_paddr(ptr
);
83 errno_t
mbuf_get(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
85 /* Must set *mbuf to NULL in failure case */
86 *mbuf
= m_get(how
, type
);
88 return (*mbuf
== NULL
) ? ENOMEM
: 0;
91 errno_t
mbuf_gethdr(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
93 /* Must set *mbuf to NULL in failure case */
94 *mbuf
= m_gethdr(how
, type
);
96 return (*mbuf
== NULL
) ? ENOMEM
: 0;
99 extern struct mbuf
* m_mbigget(struct mbuf
*m
, int nowait
);
101 errno_t
mbuf_getcluster(mbuf_how_t how
, mbuf_type_t type
, size_t size
, mbuf_t
* mbuf
)
103 /* Must set *mbuf to NULL in failure case */
110 *mbuf
= m_get(how
, type
);
116 * At the time this code was written, m_mclget and m_mbigget would always
117 * return the same value that was passed in to it.
119 if (size
== MCLBYTES
) {
120 *mbuf
= m_mclget(*mbuf
, how
);
121 } else if (size
== NBPG
) {
122 *mbuf
= m_mbigget(*mbuf
, how
);
127 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0)
130 if (created
&& error
!= 0) {
138 errno_t
mbuf_mclget(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
140 /* Must set *mbuf to NULL in failure case */
143 if (mbuf
== NULL
) return EINVAL
;
145 error
= mbuf_get(how
, type
, mbuf
);
152 * At the time this code was written, m_mclget would always
153 * return the same value that was passed in to it.
155 *mbuf
= m_mclget(*mbuf
, how
);
157 if (created
&& ((*mbuf
)->m_flags
& M_EXT
) == 0) {
161 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0)
167 errno_t
mbuf_getpacket(mbuf_how_t how
, mbuf_t
*mbuf
)
169 /* Must set *mbuf to NULL in failure case */
172 *mbuf
= m_getpacket_how(how
);
175 if (how
== MBUF_WAITOK
)
184 mbuf_t
mbuf_free(mbuf_t mbuf
)
189 void mbuf_freem(mbuf_t mbuf
)
194 int mbuf_freem_list(mbuf_t mbuf
)
196 return m_freem_list(mbuf
);
199 size_t mbuf_leadingspace(mbuf_t mbuf
)
201 return m_leadingspace(mbuf
);
204 size_t mbuf_trailingspace(mbuf_t mbuf
)
206 return m_trailingspace(mbuf
);
210 errno_t
mbuf_copym(mbuf_t src
, size_t offset
, size_t len
,
211 mbuf_how_t how
, mbuf_t
*new_mbuf
)
213 /* Must set *mbuf to NULL in failure case */
214 *new_mbuf
= m_copym(src
, offset
, len
, how
);
216 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
219 errno_t
mbuf_dup(mbuf_t src
, mbuf_how_t how
, mbuf_t
*new_mbuf
)
221 /* Must set *new_mbuf to NULL in failure case */
222 *new_mbuf
= m_dup(src
, how
);
224 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
227 errno_t
mbuf_prepend(mbuf_t
*orig
, size_t len
, mbuf_how_t how
)
229 /* Must set *orig to NULL in failure case */
230 *orig
= m_prepend_2(*orig
, len
, how
);
232 return (*orig
== NULL
) ? ENOMEM
: 0;
235 errno_t
mbuf_split(mbuf_t src
, size_t offset
,
236 mbuf_how_t how
, mbuf_t
*new_mbuf
)
238 /* Must set *new_mbuf to NULL in failure case */
239 *new_mbuf
= m_split(src
, offset
, how
);
241 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
244 errno_t
mbuf_pullup(mbuf_t
*mbuf
, size_t len
)
246 /* Must set *mbuf to NULL in failure case */
247 *mbuf
= m_pullup(*mbuf
, len
);
249 return (*mbuf
== NULL
) ? ENOMEM
: 0;
252 errno_t
mbuf_pulldown(mbuf_t src
, size_t *offset
, size_t len
, mbuf_t
*location
)
254 /* Must set *location to NULL in failure case */
256 *location
= m_pulldown(src
, *offset
, len
, &new_offset
);
257 *offset
= new_offset
;
259 return (*location
== NULL
) ? ENOMEM
: 0;
262 void mbuf_adj(mbuf_t mbuf
, int len
)
267 errno_t
mbuf_copydata(mbuf_t m
, size_t off
, size_t len
, void* out_data
)
269 /* Copied m_copydata, added error handling (don't just panic) */
275 if (off
< (size_t)m
->m_len
)
283 count
= m
->m_len
- off
> len
? len
: m
->m_len
- off
;
284 bcopy(mtod(m
, caddr_t
) + off
, out_data
, count
);
286 out_data
= ((char*)out_data
) + count
;
294 int mbuf_mclref(mbuf_t mbuf
)
296 return m_mclref(mbuf
);
299 int mbuf_mclunref(mbuf_t mbuf
)
301 return m_mclunref(mbuf
);
304 int mbuf_mclhasreference(mbuf_t mbuf
)
306 if ((mbuf
->m_flags
& M_EXT
))
307 return m_mclhasreference(mbuf
);
314 mbuf_t
mbuf_next(mbuf_t mbuf
)
319 errno_t
mbuf_setnext(mbuf_t mbuf
, mbuf_t next
)
321 if (next
&& ((next
)->m_nextpkt
!= NULL
||
322 (next
)->m_type
== MT_FREE
)) return EINVAL
;
328 mbuf_t
mbuf_nextpkt(mbuf_t mbuf
)
330 return mbuf
->m_nextpkt
;
333 void mbuf_setnextpkt(mbuf_t mbuf
, mbuf_t nextpkt
)
335 mbuf
->m_nextpkt
= nextpkt
;
338 size_t mbuf_len(mbuf_t mbuf
)
343 void mbuf_setlen(mbuf_t mbuf
, size_t len
)
348 size_t mbuf_maxlen(mbuf_t mbuf
)
350 if (mbuf
->m_flags
& M_EXT
)
351 return mbuf
->m_ext
.ext_size
;
352 return &mbuf
->m_dat
[MLEN
] - ((char*)mbuf_datastart(mbuf
));
355 mbuf_type_t
mbuf_type(mbuf_t mbuf
)
360 errno_t
mbuf_settype(mbuf_t mbuf
, mbuf_type_t new_type
)
362 if (new_type
== MBUF_TYPE_FREE
) return EINVAL
;
364 m_mchtype(mbuf
, new_type
);
369 mbuf_flags_t
mbuf_flags(mbuf_t mbuf
)
371 return mbuf
->m_flags
& mbuf_flags_mask
;
374 errno_t
mbuf_setflags(mbuf_t mbuf
, mbuf_flags_t flags
)
376 if ((flags
& ~mbuf_flags_mask
) != 0) return EINVAL
;
377 mbuf
->m_flags
= flags
|
378 (mbuf
->m_flags
& ~mbuf_flags_mask
);
383 errno_t
mbuf_setflags_mask(mbuf_t mbuf
, mbuf_flags_t flags
, mbuf_flags_t mask
)
385 if (((flags
| mask
) & ~mbuf_flags_mask
) != 0) return EINVAL
;
387 mbuf
->m_flags
= (flags
& mask
) | (mbuf
->m_flags
& ~mask
);
392 errno_t
mbuf_copy_pkthdr(mbuf_t dest
, mbuf_t src
)
394 if (((src
)->m_flags
& M_PKTHDR
) == 0)
397 m_copy_pkthdr(dest
, src
);
402 size_t mbuf_pkthdr_len(mbuf_t mbuf
)
404 return mbuf
->m_pkthdr
.len
;
407 void mbuf_pkthdr_setlen(mbuf_t mbuf
, size_t len
)
409 mbuf
->m_pkthdr
.len
= len
;
412 ifnet_t
mbuf_pkthdr_rcvif(mbuf_t mbuf
)
414 // If we reference count ifnets, we should take a reference here before returning
415 return mbuf
->m_pkthdr
.rcvif
;
418 errno_t
mbuf_pkthdr_setrcvif(mbuf_t mbuf
, ifnet_t ifnet
)
420 /* May want to walk ifnet list to determine if interface is valid */
421 mbuf
->m_pkthdr
.rcvif
= (struct ifnet
*)ifnet
;
425 void* mbuf_pkthdr_header(mbuf_t mbuf
)
427 return mbuf
->m_pkthdr
.header
;
430 void mbuf_pkthdr_setheader(mbuf_t mbuf
, void *header
)
432 mbuf
->m_pkthdr
.header
= (void*)header
;
436 errno_t
mbuf_aux_add(mbuf_t mbuf
, int family
, mbuf_type_t type
, mbuf_t
*aux_mbuf
)
438 *aux_mbuf
= m_aux_add(mbuf
, family
, type
);
439 return (*aux_mbuf
== NULL
) ? ENOMEM
: 0;
442 mbuf_t
mbuf_aux_find(mbuf_t mbuf
, int family
, mbuf_type_t type
)
444 return m_aux_find(mbuf
, family
, type
);
447 void mbuf_aux_delete(mbuf_t mbuf
, mbuf_t aux
)
449 m_aux_delete(mbuf
, aux
);
453 mbuf_inbound_modified(mbuf_t mbuf
)
455 /* Invalidate hardware generated checksum flags */
456 mbuf
->m_pkthdr
.csum_flags
= 0;
459 extern void in_cksum_offset(struct mbuf
* m
, size_t ip_offset
);
460 extern void in_delayed_cksum_offset(struct mbuf
*m
, int ip_offset
);
463 mbuf_outbound_finalize(mbuf_t mbuf
, u_long protocol_family
, size_t protocol_offset
)
465 if ((mbuf
->m_pkthdr
.csum_flags
&
466 (CSUM_DELAY_DATA
| CSUM_DELAY_IP
| CSUM_TCP_SUM16
)) == 0)
469 /* Generate the packet in software, client needs it */
470 switch (protocol_family
) {
472 if (mbuf
->m_pkthdr
.csum_flags
& CSUM_TCP_SUM16
) {
474 * If you're wondering where this lovely code comes
475 * from, we're trying to undo what happens in ip_output.
476 * Look for CSUM_TCP_SUM16 in ip_output.
478 u_int16_t first
, second
;
479 mbuf
->m_pkthdr
.csum_flags
&= ~CSUM_TCP_SUM16
;
480 mbuf
->m_pkthdr
.csum_flags
|= CSUM_TCP
;
481 first
= mbuf
->m_pkthdr
.csum_data
>> 16;
482 second
= mbuf
->m_pkthdr
.csum_data
& 0xffff;
483 mbuf
->m_pkthdr
.csum_data
= first
- second
;
485 if (mbuf
->m_pkthdr
.csum_flags
& CSUM_DELAY_DATA
) {
486 in_delayed_cksum_offset(mbuf
, protocol_offset
);
489 if (mbuf
->m_pkthdr
.csum_flags
& CSUM_DELAY_IP
) {
490 in_cksum_offset(mbuf
, protocol_offset
);
493 mbuf
->m_pkthdr
.csum_flags
&= ~(CSUM_DELAY_DATA
| CSUM_DELAY_IP
);
498 * Not sure what to do here if anything.
499 * Hardware checksum code looked pretty IPv4 specific.
501 if ((mbuf
->m_pkthdr
.csum_flags
& (CSUM_DELAY_DATA
| CSUM_DELAY_IP
)) != 0)
502 panic("mbuf_outbound_finalize - CSUM flags set for non-IPv4 packet (%d)!\n", protocol_family
);
511 mbuf
->m_pkthdr
.csum_flags
|= CSUM_VLAN_TAG_VALID
;
512 mbuf
->m_pkthdr
.vlan_tag
= vlan
;
522 if ((mbuf
->m_pkthdr
.csum_flags
& CSUM_VLAN_TAG_VALID
) == 0)
523 return ENXIO
; // No vlan tag set
525 *vlan
= mbuf
->m_pkthdr
.vlan_tag
;
534 mbuf
->m_pkthdr
.csum_flags
&= ~CSUM_VLAN_TAG_VALID
;
535 mbuf
->m_pkthdr
.vlan_tag
= 0;
540 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags
=
541 MBUF_CSUM_REQ_IP
| MBUF_CSUM_REQ_TCP
| MBUF_CSUM_REQ_UDP
| MBUF_CSUM_REQ_SUM16
;
544 mbuf_set_csum_requested(
546 mbuf_csum_request_flags_t request
,
549 request
&= mbuf_valid_csum_request_flags
;
550 mbuf
->m_pkthdr
.csum_flags
= (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | request
;
551 mbuf
->m_pkthdr
.csum_data
= value
;
557 mbuf_get_csum_requested(
559 mbuf_csum_request_flags_t
*request
,
562 *request
= mbuf
->m_pkthdr
.csum_flags
;
563 *request
&= mbuf_valid_csum_request_flags
;
565 *value
= mbuf
->m_pkthdr
.csum_data
;
572 mbuf_clear_csum_requested(
575 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
576 mbuf
->m_pkthdr
.csum_data
= 0;
581 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags
=
582 MBUF_CSUM_DID_IP
| MBUF_CSUM_IP_GOOD
| MBUF_CSUM_DID_DATA
|
583 MBUF_CSUM_PSEUDO_HDR
| MBUF_CSUM_TCP_SUM16
;
586 mbuf_set_csum_performed(
588 mbuf_csum_performed_flags_t performed
,
591 performed
&= mbuf_valid_csum_performed_flags
;
592 mbuf
->m_pkthdr
.csum_flags
= (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | performed
;
593 mbuf
->m_pkthdr
.csum_data
= value
;
599 mbuf_get_csum_performed(
601 mbuf_csum_performed_flags_t
*performed
,
604 *performed
= mbuf
->m_pkthdr
.csum_flags
& mbuf_valid_csum_performed_flags
;
605 *value
= mbuf
->m_pkthdr
.csum_data
;
611 mbuf_clear_csum_performed(
614 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
615 mbuf
->m_pkthdr
.csum_data
= 0;
624 struct mbuf_tag_id_entry
{
625 SLIST_ENTRY(mbuf_tag_id_entry
) next
;
630 #define MBUF_TAG_ID_ENTRY_SIZE(__str) \
631 ((size_t)&(((struct mbuf_tag_id_entry*)0)->string[0]) + \
634 #define MTAG_FIRST_ID 1000
635 static u_long mtag_id_next
= MTAG_FIRST_ID
;
636 static SLIST_HEAD(,mbuf_tag_id_entry
) mtag_id_list
= {NULL
};
637 static lck_mtx_t
*mtag_id_lock
= NULL
;
639 __private_extern__
void
640 mbuf_tag_id_first_last(
644 *first
= MTAG_FIRST_ID
;
645 *last
= mtag_id_next
- 1;
648 __private_extern__ errno_t
649 mbuf_tag_id_find_internal(
654 struct mbuf_tag_id_entry
*entry
= NULL
;
659 if (string
== NULL
|| out_id
== NULL
) {
663 /* Don't bother allocating the lock if we're only doing a lookup */
664 if (create
== 0 && mtag_id_lock
== NULL
)
667 /* Allocate lock if necessary */
668 if (mtag_id_lock
== NULL
) {
669 lck_grp_attr_t
*grp_attrib
= NULL
;
670 lck_attr_t
*lck_attrb
= NULL
;
671 lck_grp_t
*lck_group
= NULL
;
672 lck_mtx_t
*new_lock
= NULL
;
674 grp_attrib
= lck_grp_attr_alloc_init();
675 lck_grp_attr_setdefault(grp_attrib
);
676 lck_group
= lck_grp_alloc_init("mbuf_tag_allocate_id", grp_attrib
);
677 lck_grp_attr_free(grp_attrib
);
678 lck_attrb
= lck_attr_alloc_init();
679 lck_attr_setdefault(lck_attrb
);
680 lck_attr_setdebug(lck_attrb
);
681 new_lock
= lck_mtx_alloc_init(lck_group
, lck_attrb
);
682 if (!OSCompareAndSwap((UInt32
)0, (UInt32
)new_lock
, (UInt32
*)&mtag_id_lock
)) {
684 * If the atomic swap fails, someone else has already
685 * done this work. We can free the stuff we allocated.
687 lck_mtx_free(new_lock
, lck_group
);
688 lck_grp_free(lck_group
);
690 lck_attr_free(lck_attrb
);
693 /* Look for an existing entry */
694 lck_mtx_lock(mtag_id_lock
);
695 SLIST_FOREACH(entry
, &mtag_id_list
, next
) {
696 if (strcmp(string
, entry
->string
) == 0) {
703 lck_mtx_unlock(mtag_id_lock
);
707 entry
= kalloc(MBUF_TAG_ID_ENTRY_SIZE(string
));
709 lck_mtx_unlock(mtag_id_lock
);
713 strcpy(entry
->string
, string
);
714 entry
->id
= mtag_id_next
;
716 SLIST_INSERT_HEAD(&mtag_id_list
, entry
, next
);
718 lck_mtx_unlock(mtag_id_lock
);
728 mbuf_tag_id_t
*out_id
)
730 return mbuf_tag_id_find_internal(string
, (u_long
*)out_id
, 1);
737 mbuf_tag_type_t type
,
747 /* Sanity check parameters */
748 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< MTAG_FIRST_ID
||
749 id
>= mtag_id_next
|| length
< 1 || (length
& 0xffff0000) != 0 ||
754 /* Make sure this mtag hasn't already been allocated */
755 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
760 /* Allocate an mtag */
761 tag
= m_tag_alloc(id
, type
, length
, how
);
763 return how
== M_WAITOK
? ENOMEM
: EWOULDBLOCK
;
766 /* Attach the mtag and set *data_p */
767 m_tag_prepend(mbuf
, tag
);
777 mbuf_tag_type_t type
,
788 /* Sanity check parameters */
789 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< MTAG_FIRST_ID
||
790 id
>= mtag_id_next
|| length
== NULL
|| data_p
== NULL
) {
795 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
800 /* Copy out the pointer to the data and the lenght value */
801 *length
= tag
->m_tag_len
;
811 mbuf_tag_type_t type
)
815 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< MTAG_FIRST_ID
||
819 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
824 m_tag_delete(mbuf
, tag
);
829 void mbuf_stats(struct mbuf_stat
*stats
)
831 stats
->mbufs
= mbstat
.m_mbufs
;
832 stats
->clusters
= mbstat
.m_clusters
;
833 stats
->clfree
= mbstat
.m_clfree
;
834 stats
->drops
= mbstat
.m_drops
;
835 stats
->wait
= mbstat
.m_wait
;
836 stats
->drain
= mbstat
.m_drain
;
837 __builtin_memcpy(stats
->mtypes
, mbstat
.m_mtypes
, sizeof(stats
->mtypes
));
838 stats
->mcfail
= mbstat
.m_mcfail
;
839 stats
->mpfail
= mbstat
.m_mpfail
;
840 stats
->msize
= mbstat
.m_msize
;
841 stats
->mclbytes
= mbstat
.m_mclbytes
;
842 stats
->minclsize
= mbstat
.m_minclsize
;
843 stats
->mlen
= mbstat
.m_mlen
;
844 stats
->mhlen
= mbstat
.m_mhlen
;
845 stats
->bigclusters
= mbstat
.m_bigclusters
;
846 stats
->bigclfree
= mbstat
.m_bigclfree
;
847 stats
->bigmclbytes
= mbstat
.m_bigmclbytes
;
851 mbuf_allocpacket(mbuf_how_t how
, size_t packetlen
, unsigned int *maxchunks
, mbuf_t
*mbuf
)
855 unsigned int numpkts
= 1;
856 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
858 if (packetlen
== 0) {
862 m
= m_allocpacket_internal(&numpkts
, packetlen
, maxchunks
? &numchunks
: NULL
, how
, 1, 0);
864 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
)
878 * mbuf_copyback differs from m_copyback in a few ways:
879 * 1) mbuf_copyback will allocate clusters for new mbufs we append
880 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
881 * 3) mbuf_copyback reports whether or not the operation succeeded
882 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
897 const char *cp
= data
;
899 if (m
== NULL
|| len
== 0 || data
== NULL
)
902 while (off
> (mlen
= m
->m_len
)) {
905 if (m
->m_next
== 0) {
906 n
= m_getclr(how
, m
->m_type
);
911 n
->m_len
= MIN(MLEN
, len
+ off
);
918 mlen
= MIN(m
->m_len
- off
, len
);
919 if (mlen
< len
&& m
->m_next
== NULL
&& mbuf_trailingspace(m
) > 0) {
920 size_t grow
= MIN(mbuf_trailingspace(m
), len
- mlen
);
924 bcopy(cp
, off
+ (char*)mbuf_data(m
), (unsigned)mlen
);
932 if (m
->m_next
== 0) {
933 n
= m_get(how
, m
->m_type
);
938 if (len
> MINCLSIZE
) {
939 /* cluter allocation failure is okay, we can grow chain */
940 mbuf_mclget(how
, m
->m_type
, &n
);
942 n
->m_len
= MIN(mbuf_maxlen(n
), len
);
949 if ((m_start
->m_flags
& M_PKTHDR
) && (m_start
->m_pkthdr
.len
< totlen
))
950 m_start
->m_pkthdr
.len
= totlen
;