2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
32 //#include <sys/kpi_interface.h>
34 #include <sys/param.h>
36 #include <sys/socket.h>
37 #include <kern/debug.h>
38 #include <libkern/OSAtomic.h>
39 #include <kern/kalloc.h>
42 void mbuf_tag_id_first_last(u_long
*first
, u_long
*last
);
43 errno_t
mbuf_tag_id_find_internal(const char *string
, u_long
*out_id
, int create
);
45 static const mbuf_flags_t mbuf_flags_mask
= MBUF_EXT
| MBUF_PKTHDR
| MBUF_EOR
|
46 MBUF_BCAST
| MBUF_MCAST
| MBUF_FRAG
| MBUF_FIRSTFRAG
|
47 MBUF_LASTFRAG
| MBUF_PROMISC
;
49 void* mbuf_data(mbuf_t mbuf
)
54 void* mbuf_datastart(mbuf_t mbuf
)
56 if (mbuf
->m_flags
& M_EXT
)
57 return mbuf
->m_ext
.ext_buf
;
58 if (mbuf
->m_flags
& M_PKTHDR
)
59 return mbuf
->m_pktdat
;
63 errno_t
mbuf_setdata(mbuf_t mbuf
, void* data
, size_t len
)
65 size_t start
= (size_t)((char*)mbuf_datastart(mbuf
));
66 size_t maxlen
= mbuf_maxlen(mbuf
);
68 if ((size_t)data
< start
|| ((size_t)data
) + len
> start
+ maxlen
)
76 errno_t
mbuf_align_32(mbuf_t mbuf
, size_t len
)
78 if ((mbuf
->m_flags
& M_EXT
) != 0 && m_mclhasreference(mbuf
))
80 mbuf
->m_data
= mbuf_datastart(mbuf
);
81 mbuf
->m_data
+= ((mbuf_trailingspace(mbuf
) - len
) &~ (sizeof(u_int32_t
) - 1));
86 addr64_t
mbuf_data_to_physical(void* ptr
)
88 return (addr64_t
)mcl_to_paddr(ptr
);
91 errno_t
mbuf_get(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
93 /* Must set *mbuf to NULL in failure case */
94 *mbuf
= m_get(how
, type
);
96 return (*mbuf
== NULL
) ? ENOMEM
: 0;
99 errno_t
mbuf_gethdr(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
101 /* Must set *mbuf to NULL in failure case */
102 *mbuf
= m_gethdr(how
, type
);
104 return (*mbuf
== NULL
) ? ENOMEM
: 0;
107 extern struct mbuf
* m_mbigget(struct mbuf
*m
, int nowait
);
109 errno_t
mbuf_getcluster(mbuf_how_t how
, mbuf_type_t type
, size_t size
, mbuf_t
* mbuf
)
111 /* Must set *mbuf to NULL in failure case */
118 *mbuf
= m_get(how
, type
);
124 * At the time this code was written, m_mclget and m_mbigget would always
125 * return the same value that was passed in to it.
127 if (size
== MCLBYTES
) {
128 *mbuf
= m_mclget(*mbuf
, how
);
129 } else if (size
== NBPG
) {
130 *mbuf
= m_mbigget(*mbuf
, how
);
135 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0)
138 if (created
&& error
!= 0) {
146 errno_t
mbuf_mclget(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
148 /* Must set *mbuf to NULL in failure case */
151 if (mbuf
== NULL
) return EINVAL
;
153 error
= mbuf_get(how
, type
, mbuf
);
160 * At the time this code was written, m_mclget would always
161 * return the same value that was passed in to it.
163 *mbuf
= m_mclget(*mbuf
, how
);
165 if (created
&& ((*mbuf
)->m_flags
& M_EXT
) == 0) {
169 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0)
175 errno_t
mbuf_getpacket(mbuf_how_t how
, mbuf_t
*mbuf
)
177 /* Must set *mbuf to NULL in failure case */
180 *mbuf
= m_getpacket_how(how
);
183 if (how
== MBUF_WAITOK
)
192 mbuf_t
mbuf_free(mbuf_t mbuf
)
197 void mbuf_freem(mbuf_t mbuf
)
202 int mbuf_freem_list(mbuf_t mbuf
)
204 return m_freem_list(mbuf
);
207 size_t mbuf_leadingspace(mbuf_t mbuf
)
209 return m_leadingspace(mbuf
);
212 size_t mbuf_trailingspace(mbuf_t mbuf
)
214 return m_trailingspace(mbuf
);
218 errno_t
mbuf_copym(mbuf_t src
, size_t offset
, size_t len
,
219 mbuf_how_t how
, mbuf_t
*new_mbuf
)
221 /* Must set *mbuf to NULL in failure case */
222 *new_mbuf
= m_copym(src
, offset
, len
, how
);
224 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
227 errno_t
mbuf_dup(mbuf_t src
, mbuf_how_t how
, mbuf_t
*new_mbuf
)
229 /* Must set *new_mbuf to NULL in failure case */
230 *new_mbuf
= m_dup(src
, how
);
232 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
235 errno_t
mbuf_prepend(mbuf_t
*orig
, size_t len
, mbuf_how_t how
)
237 /* Must set *orig to NULL in failure case */
238 *orig
= m_prepend_2(*orig
, len
, how
);
240 return (*orig
== NULL
) ? ENOMEM
: 0;
243 errno_t
mbuf_split(mbuf_t src
, size_t offset
,
244 mbuf_how_t how
, mbuf_t
*new_mbuf
)
246 /* Must set *new_mbuf to NULL in failure case */
247 *new_mbuf
= m_split(src
, offset
, how
);
249 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
252 errno_t
mbuf_pullup(mbuf_t
*mbuf
, size_t len
)
254 /* Must set *mbuf to NULL in failure case */
255 *mbuf
= m_pullup(*mbuf
, len
);
257 return (*mbuf
== NULL
) ? ENOMEM
: 0;
260 errno_t
mbuf_pulldown(mbuf_t src
, size_t *offset
, size_t len
, mbuf_t
*location
)
262 /* Must set *location to NULL in failure case */
264 *location
= m_pulldown(src
, *offset
, len
, &new_offset
);
265 *offset
= new_offset
;
267 return (*location
== NULL
) ? ENOMEM
: 0;
270 void mbuf_adj(mbuf_t mbuf
, int len
)
275 errno_t
mbuf_copydata(mbuf_t m
, size_t off
, size_t len
, void* out_data
)
277 /* Copied m_copydata, added error handling (don't just panic) */
283 if (off
< (size_t)m
->m_len
)
291 count
= m
->m_len
- off
> len
? len
: m
->m_len
- off
;
292 bcopy(mtod(m
, caddr_t
) + off
, out_data
, count
);
294 out_data
= ((char*)out_data
) + count
;
302 int mbuf_mclref(mbuf_t mbuf
)
304 return m_mclref(mbuf
);
307 int mbuf_mclunref(mbuf_t mbuf
)
309 return m_mclunref(mbuf
);
312 int mbuf_mclhasreference(mbuf_t mbuf
)
314 if ((mbuf
->m_flags
& M_EXT
))
315 return m_mclhasreference(mbuf
);
322 mbuf_t
mbuf_next(mbuf_t mbuf
)
327 errno_t
mbuf_setnext(mbuf_t mbuf
, mbuf_t next
)
329 if (next
&& ((next
)->m_nextpkt
!= NULL
||
330 (next
)->m_type
== MT_FREE
)) return EINVAL
;
336 mbuf_t
mbuf_nextpkt(mbuf_t mbuf
)
338 return mbuf
->m_nextpkt
;
341 void mbuf_setnextpkt(mbuf_t mbuf
, mbuf_t nextpkt
)
343 mbuf
->m_nextpkt
= nextpkt
;
346 size_t mbuf_len(mbuf_t mbuf
)
351 void mbuf_setlen(mbuf_t mbuf
, size_t len
)
356 size_t mbuf_maxlen(mbuf_t mbuf
)
358 if (mbuf
->m_flags
& M_EXT
)
359 return mbuf
->m_ext
.ext_size
;
360 return &mbuf
->m_dat
[MLEN
] - ((char*)mbuf_datastart(mbuf
));
363 mbuf_type_t
mbuf_type(mbuf_t mbuf
)
368 errno_t
mbuf_settype(mbuf_t mbuf
, mbuf_type_t new_type
)
370 if (new_type
== MBUF_TYPE_FREE
) return EINVAL
;
372 m_mchtype(mbuf
, new_type
);
377 mbuf_flags_t
mbuf_flags(mbuf_t mbuf
)
379 return mbuf
->m_flags
& mbuf_flags_mask
;
382 errno_t
mbuf_setflags(mbuf_t mbuf
, mbuf_flags_t flags
)
384 if ((flags
& ~mbuf_flags_mask
) != 0) return EINVAL
;
385 mbuf
->m_flags
= flags
|
386 (mbuf
->m_flags
& ~mbuf_flags_mask
);
391 errno_t
mbuf_setflags_mask(mbuf_t mbuf
, mbuf_flags_t flags
, mbuf_flags_t mask
)
393 if (((flags
| mask
) & ~mbuf_flags_mask
) != 0) return EINVAL
;
395 mbuf
->m_flags
= (flags
& mask
) | (mbuf
->m_flags
& ~mask
);
400 errno_t
mbuf_copy_pkthdr(mbuf_t dest
, mbuf_t src
)
402 if (((src
)->m_flags
& M_PKTHDR
) == 0)
405 m_copy_pkthdr(dest
, src
);
410 size_t mbuf_pkthdr_len(mbuf_t mbuf
)
412 return mbuf
->m_pkthdr
.len
;
415 void mbuf_pkthdr_setlen(mbuf_t mbuf
, size_t len
)
417 mbuf
->m_pkthdr
.len
= len
;
420 ifnet_t
mbuf_pkthdr_rcvif(mbuf_t mbuf
)
422 // If we reference count ifnets, we should take a reference here before returning
423 return mbuf
->m_pkthdr
.rcvif
;
426 errno_t
mbuf_pkthdr_setrcvif(mbuf_t mbuf
, ifnet_t ifnet
)
428 /* May want to walk ifnet list to determine if interface is valid */
429 mbuf
->m_pkthdr
.rcvif
= (struct ifnet
*)ifnet
;
433 void* mbuf_pkthdr_header(mbuf_t mbuf
)
435 return mbuf
->m_pkthdr
.header
;
438 void mbuf_pkthdr_setheader(mbuf_t mbuf
, void *header
)
440 mbuf
->m_pkthdr
.header
= (void*)header
;
444 errno_t
mbuf_aux_add(mbuf_t mbuf
, int family
, mbuf_type_t type
, mbuf_t
*aux_mbuf
)
446 *aux_mbuf
= m_aux_add(mbuf
, family
, type
);
447 return (*aux_mbuf
== NULL
) ? ENOMEM
: 0;
450 mbuf_t
mbuf_aux_find(mbuf_t mbuf
, int family
, mbuf_type_t type
)
452 return m_aux_find(mbuf
, family
, type
);
455 void mbuf_aux_delete(mbuf_t mbuf
, mbuf_t aux
)
457 m_aux_delete(mbuf
, aux
);
461 mbuf_inbound_modified(mbuf_t mbuf
)
463 /* Invalidate hardware generated checksum flags */
464 mbuf
->m_pkthdr
.csum_flags
= 0;
467 extern void in_cksum_offset(struct mbuf
* m
, size_t ip_offset
);
468 extern void in_delayed_cksum_offset(struct mbuf
*m
, int ip_offset
);
471 mbuf_outbound_finalize(mbuf_t mbuf
, u_long protocol_family
, size_t protocol_offset
)
473 if ((mbuf
->m_pkthdr
.csum_flags
&
474 (CSUM_DELAY_DATA
| CSUM_DELAY_IP
| CSUM_TCP_SUM16
)) == 0)
477 /* Generate the packet in software, client needs it */
478 switch (protocol_family
) {
480 if (mbuf
->m_pkthdr
.csum_flags
& CSUM_TCP_SUM16
) {
482 * If you're wondering where this lovely code comes
483 * from, we're trying to undo what happens in ip_output.
484 * Look for CSUM_TCP_SUM16 in ip_output.
486 u_int16_t first
, second
;
487 mbuf
->m_pkthdr
.csum_flags
&= ~CSUM_TCP_SUM16
;
488 mbuf
->m_pkthdr
.csum_flags
|= CSUM_TCP
;
489 first
= mbuf
->m_pkthdr
.csum_data
>> 16;
490 second
= mbuf
->m_pkthdr
.csum_data
& 0xffff;
491 mbuf
->m_pkthdr
.csum_data
= first
- second
;
493 if (mbuf
->m_pkthdr
.csum_flags
& CSUM_DELAY_DATA
) {
494 in_delayed_cksum_offset(mbuf
, protocol_offset
);
497 if (mbuf
->m_pkthdr
.csum_flags
& CSUM_DELAY_IP
) {
498 in_cksum_offset(mbuf
, protocol_offset
);
501 mbuf
->m_pkthdr
.csum_flags
&= ~(CSUM_DELAY_DATA
| CSUM_DELAY_IP
);
506 * Not sure what to do here if anything.
507 * Hardware checksum code looked pretty IPv4 specific.
509 if ((mbuf
->m_pkthdr
.csum_flags
& (CSUM_DELAY_DATA
| CSUM_DELAY_IP
)) != 0)
510 panic("mbuf_outbound_finalize - CSUM flags set for non-IPv4 packet (%d)!\n", protocol_family
);
519 mbuf
->m_pkthdr
.csum_flags
|= CSUM_VLAN_TAG_VALID
;
520 mbuf
->m_pkthdr
.vlan_tag
= vlan
;
530 if ((mbuf
->m_pkthdr
.csum_flags
& CSUM_VLAN_TAG_VALID
) == 0)
531 return ENXIO
; // No vlan tag set
533 *vlan
= mbuf
->m_pkthdr
.vlan_tag
;
542 mbuf
->m_pkthdr
.csum_flags
&= ~CSUM_VLAN_TAG_VALID
;
543 mbuf
->m_pkthdr
.vlan_tag
= 0;
548 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags
=
549 MBUF_CSUM_REQ_IP
| MBUF_CSUM_REQ_TCP
| MBUF_CSUM_REQ_UDP
| MBUF_CSUM_REQ_SUM16
;
552 mbuf_set_csum_requested(
554 mbuf_csum_request_flags_t request
,
557 request
&= mbuf_valid_csum_request_flags
;
558 mbuf
->m_pkthdr
.csum_flags
= (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | request
;
559 mbuf
->m_pkthdr
.csum_data
= value
;
565 mbuf_get_csum_requested(
567 mbuf_csum_request_flags_t
*request
,
570 *request
= mbuf
->m_pkthdr
.csum_flags
;
571 *request
&= mbuf_valid_csum_request_flags
;
573 *value
= mbuf
->m_pkthdr
.csum_data
;
580 mbuf_clear_csum_requested(
583 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
584 mbuf
->m_pkthdr
.csum_data
= 0;
589 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags
=
590 MBUF_CSUM_DID_IP
| MBUF_CSUM_IP_GOOD
| MBUF_CSUM_DID_DATA
|
591 MBUF_CSUM_PSEUDO_HDR
| MBUF_CSUM_TCP_SUM16
;
594 mbuf_set_csum_performed(
596 mbuf_csum_performed_flags_t performed
,
599 performed
&= mbuf_valid_csum_performed_flags
;
600 mbuf
->m_pkthdr
.csum_flags
= (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | performed
;
601 mbuf
->m_pkthdr
.csum_data
= value
;
607 mbuf_get_csum_performed(
609 mbuf_csum_performed_flags_t
*performed
,
612 *performed
= mbuf
->m_pkthdr
.csum_flags
& mbuf_valid_csum_performed_flags
;
613 *value
= mbuf
->m_pkthdr
.csum_data
;
619 mbuf_clear_csum_performed(
622 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
623 mbuf
->m_pkthdr
.csum_data
= 0;
632 struct mbuf_tag_id_entry
{
633 SLIST_ENTRY(mbuf_tag_id_entry
) next
;
638 #define MBUF_TAG_ID_ENTRY_SIZE(__str) \
639 ((size_t)&(((struct mbuf_tag_id_entry*)0)->string[0]) + \
642 #define MTAG_FIRST_ID 1000
643 static u_long mtag_id_next
= MTAG_FIRST_ID
;
644 static SLIST_HEAD(,mbuf_tag_id_entry
) mtag_id_list
= {NULL
};
645 static lck_mtx_t
*mtag_id_lock
= NULL
;
647 __private_extern__
void
648 mbuf_tag_id_first_last(
652 *first
= MTAG_FIRST_ID
;
653 *last
= mtag_id_next
- 1;
656 __private_extern__ errno_t
657 mbuf_tag_id_find_internal(
662 struct mbuf_tag_id_entry
*entry
= NULL
;
667 if (string
== NULL
|| out_id
== NULL
) {
671 /* Don't bother allocating the lock if we're only doing a lookup */
672 if (create
== 0 && mtag_id_lock
== NULL
)
675 /* Allocate lock if necessary */
676 if (mtag_id_lock
== NULL
) {
677 lck_grp_attr_t
*grp_attrib
= NULL
;
678 lck_attr_t
*lck_attrb
= NULL
;
679 lck_grp_t
*lck_group
= NULL
;
680 lck_mtx_t
*new_lock
= NULL
;
682 grp_attrib
= lck_grp_attr_alloc_init();
683 lck_grp_attr_setdefault(grp_attrib
);
684 lck_group
= lck_grp_alloc_init("mbuf_tag_allocate_id", grp_attrib
);
685 lck_grp_attr_free(grp_attrib
);
686 lck_attrb
= lck_attr_alloc_init();
687 lck_attr_setdefault(lck_attrb
);
688 lck_attr_setdebug(lck_attrb
);
689 new_lock
= lck_mtx_alloc_init(lck_group
, lck_attrb
);
690 if (!OSCompareAndSwap((UInt32
)0, (UInt32
)new_lock
, (UInt32
*)&mtag_id_lock
)) {
692 * If the atomic swap fails, someone else has already
693 * done this work. We can free the stuff we allocated.
695 lck_mtx_free(new_lock
, lck_group
);
696 lck_grp_free(lck_group
);
698 lck_attr_free(lck_attrb
);
701 /* Look for an existing entry */
702 lck_mtx_lock(mtag_id_lock
);
703 SLIST_FOREACH(entry
, &mtag_id_list
, next
) {
704 if (strcmp(string
, entry
->string
) == 0) {
711 lck_mtx_unlock(mtag_id_lock
);
715 entry
= kalloc(MBUF_TAG_ID_ENTRY_SIZE(string
));
717 lck_mtx_unlock(mtag_id_lock
);
721 strcpy(entry
->string
, string
);
722 entry
->id
= mtag_id_next
;
724 SLIST_INSERT_HEAD(&mtag_id_list
, entry
, next
);
726 lck_mtx_unlock(mtag_id_lock
);
736 mbuf_tag_id_t
*out_id
)
738 return mbuf_tag_id_find_internal(string
, (u_long
*)out_id
, 1);
745 mbuf_tag_type_t type
,
755 /* Sanity check parameters */
756 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< MTAG_FIRST_ID
||
757 id
>= mtag_id_next
|| length
< 1 || (length
& 0xffff0000) != 0 ||
762 /* Make sure this mtag hasn't already been allocated */
763 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
768 /* Allocate an mtag */
769 tag
= m_tag_alloc(id
, type
, length
, how
);
771 return how
== M_WAITOK
? ENOMEM
: EWOULDBLOCK
;
774 /* Attach the mtag and set *data_p */
775 m_tag_prepend(mbuf
, tag
);
785 mbuf_tag_type_t type
,
796 /* Sanity check parameters */
797 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< MTAG_FIRST_ID
||
798 id
>= mtag_id_next
|| length
== NULL
|| data_p
== NULL
) {
803 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
808 /* Copy out the pointer to the data and the lenght value */
809 *length
= tag
->m_tag_len
;
819 mbuf_tag_type_t type
)
823 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< MTAG_FIRST_ID
||
827 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
832 m_tag_delete(mbuf
, tag
);
837 void mbuf_stats(struct mbuf_stat
*stats
)
839 stats
->mbufs
= mbstat
.m_mbufs
;
840 stats
->clusters
= mbstat
.m_clusters
;
841 stats
->clfree
= mbstat
.m_clfree
;
842 stats
->drops
= mbstat
.m_drops
;
843 stats
->wait
= mbstat
.m_wait
;
844 stats
->drain
= mbstat
.m_drain
;
845 __builtin_memcpy(stats
->mtypes
, mbstat
.m_mtypes
, sizeof(stats
->mtypes
));
846 stats
->mcfail
= mbstat
.m_mcfail
;
847 stats
->mpfail
= mbstat
.m_mpfail
;
848 stats
->msize
= mbstat
.m_msize
;
849 stats
->mclbytes
= mbstat
.m_mclbytes
;
850 stats
->minclsize
= mbstat
.m_minclsize
;
851 stats
->mlen
= mbstat
.m_mlen
;
852 stats
->mhlen
= mbstat
.m_mhlen
;
853 stats
->bigclusters
= mbstat
.m_bigclusters
;
854 stats
->bigclfree
= mbstat
.m_bigclfree
;
855 stats
->bigmclbytes
= mbstat
.m_bigmclbytes
;
859 mbuf_allocpacket(mbuf_how_t how
, size_t packetlen
, unsigned int *maxchunks
, mbuf_t
*mbuf
)
863 unsigned int numpkts
= 1;
864 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
866 if (packetlen
== 0) {
870 m
= m_allocpacket_internal(&numpkts
, packetlen
, maxchunks
? &numchunks
: NULL
, how
, 1, 0);
872 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
)
886 * mbuf_copyback differs from m_copyback in a few ways:
887 * 1) mbuf_copyback will allocate clusters for new mbufs we append
888 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
889 * 3) mbuf_copyback reports whether or not the operation succeeded
890 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
905 const char *cp
= data
;
907 if (m
== NULL
|| len
== 0 || data
== NULL
)
910 while (off
> (mlen
= m
->m_len
)) {
913 if (m
->m_next
== 0) {
914 n
= m_getclr(how
, m
->m_type
);
919 n
->m_len
= MIN(MLEN
, len
+ off
);
926 mlen
= MIN(m
->m_len
- off
, len
);
927 if (mlen
< len
&& m
->m_next
== NULL
&& mbuf_trailingspace(m
) > 0) {
928 size_t grow
= MIN(mbuf_trailingspace(m
), len
- mlen
);
932 bcopy(cp
, off
+ (char*)mbuf_data(m
), (unsigned)mlen
);
940 if (m
->m_next
== 0) {
941 n
= m_get(how
, m
->m_type
);
946 if (len
> MINCLSIZE
) {
947 /* cluter allocation failure is okay, we can grow chain */
948 mbuf_mclget(how
, m
->m_type
, &n
);
950 n
->m_len
= MIN(mbuf_maxlen(n
), len
);
957 if ((m_start
->m_flags
& M_PKTHDR
) && (m_start
->m_pkthdr
.len
< totlen
))
958 m_start
->m_pkthdr
.len
= totlen
;