2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 //#include <sys/kpi_interface.h>
32 #include <sys/param.h>
34 #include <sys/socket.h>
35 #include <kern/debug.h>
36 #include <libkern/OSAtomic.h>
37 #include <kern/kalloc.h>
40 void mbuf_tag_id_first_last(u_long
*first
, u_long
*last
);
41 errno_t
mbuf_tag_id_find_internal(const char *string
, u_long
*out_id
, int create
);
43 static const mbuf_flags_t mbuf_flags_mask
= MBUF_EXT
| MBUF_PKTHDR
| MBUF_EOR
|
44 MBUF_BCAST
| MBUF_MCAST
| MBUF_FRAG
| MBUF_FIRSTFRAG
|
45 MBUF_LASTFRAG
| MBUF_PROMISC
;
47 void* mbuf_data(mbuf_t mbuf
)
52 void* mbuf_datastart(mbuf_t mbuf
)
54 if (mbuf
->m_flags
& M_EXT
)
55 return mbuf
->m_ext
.ext_buf
;
56 if (mbuf
->m_flags
& M_PKTHDR
)
57 return mbuf
->m_pktdat
;
61 errno_t
mbuf_setdata(mbuf_t mbuf
, void* data
, size_t len
)
63 size_t start
= (size_t)((char*)mbuf_datastart(mbuf
));
64 size_t maxlen
= mbuf_maxlen(mbuf
);
66 if ((size_t)data
< start
|| ((size_t)data
) + len
> start
+ maxlen
)
74 errno_t
mbuf_align_32(mbuf_t mbuf
, size_t len
)
76 if ((mbuf
->m_flags
& M_EXT
) != 0 && m_mclhasreference(mbuf
))
78 mbuf
->m_data
= mbuf_datastart(mbuf
);
79 mbuf
->m_data
+= ((mbuf_trailingspace(mbuf
) - len
) &~ (sizeof(u_int32_t
) - 1));
84 addr64_t
mbuf_data_to_physical(void* ptr
)
86 return (addr64_t
)mcl_to_paddr(ptr
);
89 errno_t
mbuf_get(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
91 /* Must set *mbuf to NULL in failure case */
92 *mbuf
= m_get(how
, type
);
94 return (*mbuf
== NULL
) ? ENOMEM
: 0;
97 errno_t
mbuf_gethdr(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
99 /* Must set *mbuf to NULL in failure case */
100 *mbuf
= m_gethdr(how
, type
);
102 return (*mbuf
== NULL
) ? ENOMEM
: 0;
105 extern struct mbuf
* m_mbigget(struct mbuf
*m
, int nowait
);
107 errno_t
mbuf_getcluster(mbuf_how_t how
, mbuf_type_t type
, size_t size
, mbuf_t
* mbuf
)
109 /* Must set *mbuf to NULL in failure case */
116 *mbuf
= m_get(how
, type
);
122 * At the time this code was written, m_mclget and m_mbigget would always
123 * return the same value that was passed in to it.
125 if (size
== MCLBYTES
) {
126 *mbuf
= m_mclget(*mbuf
, how
);
127 } else if (size
== NBPG
) {
128 *mbuf
= m_mbigget(*mbuf
, how
);
133 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0)
136 if (created
&& error
!= 0) {
144 errno_t
mbuf_mclget(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
146 /* Must set *mbuf to NULL in failure case */
149 if (mbuf
== NULL
) return EINVAL
;
151 error
= mbuf_get(how
, type
, mbuf
);
158 * At the time this code was written, m_mclget would always
159 * return the same value that was passed in to it.
161 *mbuf
= m_mclget(*mbuf
, how
);
163 if (created
&& ((*mbuf
)->m_flags
& M_EXT
) == 0) {
167 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0)
173 errno_t
mbuf_getpacket(mbuf_how_t how
, mbuf_t
*mbuf
)
175 /* Must set *mbuf to NULL in failure case */
178 *mbuf
= m_getpacket_how(how
);
181 if (how
== MBUF_WAITOK
)
190 mbuf_t
mbuf_free(mbuf_t mbuf
)
195 void mbuf_freem(mbuf_t mbuf
)
200 int mbuf_freem_list(mbuf_t mbuf
)
202 return m_freem_list(mbuf
);
205 size_t mbuf_leadingspace(mbuf_t mbuf
)
207 return m_leadingspace(mbuf
);
210 size_t mbuf_trailingspace(mbuf_t mbuf
)
212 return m_trailingspace(mbuf
);
216 errno_t
mbuf_copym(mbuf_t src
, size_t offset
, size_t len
,
217 mbuf_how_t how
, mbuf_t
*new_mbuf
)
219 /* Must set *mbuf to NULL in failure case */
220 *new_mbuf
= m_copym(src
, offset
, len
, how
);
222 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
225 errno_t
mbuf_dup(mbuf_t src
, mbuf_how_t how
, mbuf_t
*new_mbuf
)
227 /* Must set *new_mbuf to NULL in failure case */
228 *new_mbuf
= m_dup(src
, how
);
230 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
233 errno_t
mbuf_prepend(mbuf_t
*orig
, size_t len
, mbuf_how_t how
)
235 /* Must set *orig to NULL in failure case */
236 *orig
= m_prepend_2(*orig
, len
, how
);
238 return (*orig
== NULL
) ? ENOMEM
: 0;
241 errno_t
mbuf_split(mbuf_t src
, size_t offset
,
242 mbuf_how_t how
, mbuf_t
*new_mbuf
)
244 /* Must set *new_mbuf to NULL in failure case */
245 *new_mbuf
= m_split(src
, offset
, how
);
247 return (*new_mbuf
== NULL
) ? ENOMEM
: 0;
250 errno_t
mbuf_pullup(mbuf_t
*mbuf
, size_t len
)
252 /* Must set *mbuf to NULL in failure case */
253 *mbuf
= m_pullup(*mbuf
, len
);
255 return (*mbuf
== NULL
) ? ENOMEM
: 0;
258 errno_t
mbuf_pulldown(mbuf_t src
, size_t *offset
, size_t len
, mbuf_t
*location
)
260 /* Must set *location to NULL in failure case */
262 *location
= m_pulldown(src
, *offset
, len
, &new_offset
);
263 *offset
= new_offset
;
265 return (*location
== NULL
) ? ENOMEM
: 0;
268 void mbuf_adj(mbuf_t mbuf
, int len
)
273 errno_t
mbuf_copydata(mbuf_t m
, size_t off
, size_t len
, void* out_data
)
275 /* Copied m_copydata, added error handling (don't just panic) */
281 if (off
< (size_t)m
->m_len
)
289 count
= m
->m_len
- off
> len
? len
: m
->m_len
- off
;
290 bcopy(mtod(m
, caddr_t
) + off
, out_data
, count
);
292 out_data
= ((char*)out_data
) + count
;
300 int mbuf_mclref(mbuf_t mbuf
)
302 return m_mclref(mbuf
);
305 int mbuf_mclunref(mbuf_t mbuf
)
307 return m_mclunref(mbuf
);
310 int mbuf_mclhasreference(mbuf_t mbuf
)
312 if ((mbuf
->m_flags
& M_EXT
))
313 return m_mclhasreference(mbuf
);
320 mbuf_t
mbuf_next(mbuf_t mbuf
)
325 errno_t
mbuf_setnext(mbuf_t mbuf
, mbuf_t next
)
327 if (next
&& ((next
)->m_nextpkt
!= NULL
||
328 (next
)->m_type
== MT_FREE
)) return EINVAL
;
334 mbuf_t
mbuf_nextpkt(mbuf_t mbuf
)
336 return mbuf
->m_nextpkt
;
339 void mbuf_setnextpkt(mbuf_t mbuf
, mbuf_t nextpkt
)
341 mbuf
->m_nextpkt
= nextpkt
;
344 size_t mbuf_len(mbuf_t mbuf
)
349 void mbuf_setlen(mbuf_t mbuf
, size_t len
)
354 size_t mbuf_maxlen(mbuf_t mbuf
)
356 if (mbuf
->m_flags
& M_EXT
)
357 return mbuf
->m_ext
.ext_size
;
358 return &mbuf
->m_dat
[MLEN
] - ((char*)mbuf_datastart(mbuf
));
361 mbuf_type_t
mbuf_type(mbuf_t mbuf
)
366 errno_t
mbuf_settype(mbuf_t mbuf
, mbuf_type_t new_type
)
368 if (new_type
== MBUF_TYPE_FREE
) return EINVAL
;
370 m_mchtype(mbuf
, new_type
);
375 mbuf_flags_t
mbuf_flags(mbuf_t mbuf
)
377 return mbuf
->m_flags
& mbuf_flags_mask
;
380 errno_t
mbuf_setflags(mbuf_t mbuf
, mbuf_flags_t flags
)
382 if ((flags
& ~mbuf_flags_mask
) != 0) return EINVAL
;
383 mbuf
->m_flags
= flags
|
384 (mbuf
->m_flags
& ~mbuf_flags_mask
);
389 errno_t
mbuf_setflags_mask(mbuf_t mbuf
, mbuf_flags_t flags
, mbuf_flags_t mask
)
391 if (((flags
| mask
) & ~mbuf_flags_mask
) != 0) return EINVAL
;
393 mbuf
->m_flags
= (flags
& mask
) | (mbuf
->m_flags
& ~mask
);
398 errno_t
mbuf_copy_pkthdr(mbuf_t dest
, mbuf_t src
)
400 if (((src
)->m_flags
& M_PKTHDR
) == 0)
403 m_copy_pkthdr(dest
, src
);
408 size_t mbuf_pkthdr_len(mbuf_t mbuf
)
410 return mbuf
->m_pkthdr
.len
;
413 void mbuf_pkthdr_setlen(mbuf_t mbuf
, size_t len
)
415 mbuf
->m_pkthdr
.len
= len
;
418 ifnet_t
mbuf_pkthdr_rcvif(mbuf_t mbuf
)
420 // If we reference count ifnets, we should take a reference here before returning
421 return mbuf
->m_pkthdr
.rcvif
;
424 errno_t
mbuf_pkthdr_setrcvif(mbuf_t mbuf
, ifnet_t ifnet
)
426 /* May want to walk ifnet list to determine if interface is valid */
427 mbuf
->m_pkthdr
.rcvif
= (struct ifnet
*)ifnet
;
431 void* mbuf_pkthdr_header(mbuf_t mbuf
)
433 return mbuf
->m_pkthdr
.header
;
436 void mbuf_pkthdr_setheader(mbuf_t mbuf
, void *header
)
438 mbuf
->m_pkthdr
.header
= (void*)header
;
442 errno_t
mbuf_aux_add(mbuf_t mbuf
, int family
, mbuf_type_t type
, mbuf_t
*aux_mbuf
)
444 *aux_mbuf
= m_aux_add(mbuf
, family
, type
);
445 return (*aux_mbuf
== NULL
) ? ENOMEM
: 0;
448 mbuf_t
mbuf_aux_find(mbuf_t mbuf
, int family
, mbuf_type_t type
)
450 return m_aux_find(mbuf
, family
, type
);
453 void mbuf_aux_delete(mbuf_t mbuf
, mbuf_t aux
)
455 m_aux_delete(mbuf
, aux
);
459 mbuf_inbound_modified(mbuf_t mbuf
)
461 /* Invalidate hardware generated checksum flags */
462 mbuf
->m_pkthdr
.csum_flags
= 0;
465 extern void in_cksum_offset(struct mbuf
* m
, size_t ip_offset
);
466 extern void in_delayed_cksum_offset(struct mbuf
*m
, int ip_offset
);
469 mbuf_outbound_finalize(mbuf_t mbuf
, u_long protocol_family
, size_t protocol_offset
)
471 if ((mbuf
->m_pkthdr
.csum_flags
&
472 (CSUM_DELAY_DATA
| CSUM_DELAY_IP
| CSUM_TCP_SUM16
)) == 0)
475 /* Generate the packet in software, client needs it */
476 switch (protocol_family
) {
478 if (mbuf
->m_pkthdr
.csum_flags
& CSUM_TCP_SUM16
) {
480 * If you're wondering where this lovely code comes
481 * from, we're trying to undo what happens in ip_output.
482 * Look for CSUM_TCP_SUM16 in ip_output.
484 u_int16_t first
, second
;
485 mbuf
->m_pkthdr
.csum_flags
&= ~CSUM_TCP_SUM16
;
486 mbuf
->m_pkthdr
.csum_flags
|= CSUM_TCP
;
487 first
= mbuf
->m_pkthdr
.csum_data
>> 16;
488 second
= mbuf
->m_pkthdr
.csum_data
& 0xffff;
489 mbuf
->m_pkthdr
.csum_data
= first
- second
;
491 if (mbuf
->m_pkthdr
.csum_flags
& CSUM_DELAY_DATA
) {
492 in_delayed_cksum_offset(mbuf
, protocol_offset
);
495 if (mbuf
->m_pkthdr
.csum_flags
& CSUM_DELAY_IP
) {
496 in_cksum_offset(mbuf
, protocol_offset
);
499 mbuf
->m_pkthdr
.csum_flags
&= ~(CSUM_DELAY_DATA
| CSUM_DELAY_IP
);
504 * Not sure what to do here if anything.
505 * Hardware checksum code looked pretty IPv4 specific.
507 if ((mbuf
->m_pkthdr
.csum_flags
& (CSUM_DELAY_DATA
| CSUM_DELAY_IP
)) != 0)
508 panic("mbuf_outbound_finalize - CSUM flags set for non-IPv4 packet (%d)!\n", protocol_family
);
517 mbuf
->m_pkthdr
.csum_flags
|= CSUM_VLAN_TAG_VALID
;
518 mbuf
->m_pkthdr
.vlan_tag
= vlan
;
528 if ((mbuf
->m_pkthdr
.csum_flags
& CSUM_VLAN_TAG_VALID
) == 0)
529 return ENXIO
; // No vlan tag set
531 *vlan
= mbuf
->m_pkthdr
.vlan_tag
;
540 mbuf
->m_pkthdr
.csum_flags
&= ~CSUM_VLAN_TAG_VALID
;
541 mbuf
->m_pkthdr
.vlan_tag
= 0;
546 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags
=
547 MBUF_CSUM_REQ_IP
| MBUF_CSUM_REQ_TCP
| MBUF_CSUM_REQ_UDP
| MBUF_CSUM_REQ_SUM16
;
550 mbuf_set_csum_requested(
552 mbuf_csum_request_flags_t request
,
555 request
&= mbuf_valid_csum_request_flags
;
556 mbuf
->m_pkthdr
.csum_flags
= (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | request
;
557 mbuf
->m_pkthdr
.csum_data
= value
;
563 mbuf_get_csum_requested(
565 mbuf_csum_request_flags_t
*request
,
568 *request
= mbuf
->m_pkthdr
.csum_flags
;
569 *request
&= mbuf_valid_csum_request_flags
;
571 *value
= mbuf
->m_pkthdr
.csum_data
;
578 mbuf_clear_csum_requested(
581 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
582 mbuf
->m_pkthdr
.csum_data
= 0;
587 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags
=
588 MBUF_CSUM_DID_IP
| MBUF_CSUM_IP_GOOD
| MBUF_CSUM_DID_DATA
|
589 MBUF_CSUM_PSEUDO_HDR
| MBUF_CSUM_TCP_SUM16
;
592 mbuf_set_csum_performed(
594 mbuf_csum_performed_flags_t performed
,
597 performed
&= mbuf_valid_csum_performed_flags
;
598 mbuf
->m_pkthdr
.csum_flags
= (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | performed
;
599 mbuf
->m_pkthdr
.csum_data
= value
;
605 mbuf_get_csum_performed(
607 mbuf_csum_performed_flags_t
*performed
,
610 *performed
= mbuf
->m_pkthdr
.csum_flags
& mbuf_valid_csum_performed_flags
;
611 *value
= mbuf
->m_pkthdr
.csum_data
;
617 mbuf_clear_csum_performed(
620 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
621 mbuf
->m_pkthdr
.csum_data
= 0;
630 struct mbuf_tag_id_entry
{
631 SLIST_ENTRY(mbuf_tag_id_entry
) next
;
636 #define MBUF_TAG_ID_ENTRY_SIZE(__str) \
637 ((size_t)&(((struct mbuf_tag_id_entry*)0)->string[0]) + \
640 #define MTAG_FIRST_ID 1000
641 static u_long mtag_id_next
= MTAG_FIRST_ID
;
642 static SLIST_HEAD(,mbuf_tag_id_entry
) mtag_id_list
= {NULL
};
643 static lck_mtx_t
*mtag_id_lock
= NULL
;
645 __private_extern__
void
646 mbuf_tag_id_first_last(
650 *first
= MTAG_FIRST_ID
;
651 *last
= mtag_id_next
- 1;
654 __private_extern__ errno_t
655 mbuf_tag_id_find_internal(
660 struct mbuf_tag_id_entry
*entry
= NULL
;
665 if (string
== NULL
|| out_id
== NULL
) {
669 /* Don't bother allocating the lock if we're only doing a lookup */
670 if (create
== 0 && mtag_id_lock
== NULL
)
673 /* Allocate lock if necessary */
674 if (mtag_id_lock
== NULL
) {
675 lck_grp_attr_t
*grp_attrib
= NULL
;
676 lck_attr_t
*lck_attrb
= NULL
;
677 lck_grp_t
*lck_group
= NULL
;
678 lck_mtx_t
*new_lock
= NULL
;
680 grp_attrib
= lck_grp_attr_alloc_init();
681 lck_grp_attr_setdefault(grp_attrib
);
682 lck_group
= lck_grp_alloc_init("mbuf_tag_allocate_id", grp_attrib
);
683 lck_grp_attr_free(grp_attrib
);
684 lck_attrb
= lck_attr_alloc_init();
685 lck_attr_setdefault(lck_attrb
);
686 lck_attr_setdebug(lck_attrb
);
687 new_lock
= lck_mtx_alloc_init(lck_group
, lck_attrb
);
688 if (!OSCompareAndSwap((UInt32
)0, (UInt32
)new_lock
, (UInt32
*)&mtag_id_lock
)) {
690 * If the atomic swap fails, someone else has already
691 * done this work. We can free the stuff we allocated.
693 lck_mtx_free(new_lock
, lck_group
);
694 lck_grp_free(lck_group
);
696 lck_attr_free(lck_attrb
);
699 /* Look for an existing entry */
700 lck_mtx_lock(mtag_id_lock
);
701 SLIST_FOREACH(entry
, &mtag_id_list
, next
) {
702 if (strcmp(string
, entry
->string
) == 0) {
709 lck_mtx_unlock(mtag_id_lock
);
713 entry
= kalloc(MBUF_TAG_ID_ENTRY_SIZE(string
));
715 lck_mtx_unlock(mtag_id_lock
);
719 strcpy(entry
->string
, string
);
720 entry
->id
= mtag_id_next
;
722 SLIST_INSERT_HEAD(&mtag_id_list
, entry
, next
);
724 lck_mtx_unlock(mtag_id_lock
);
734 mbuf_tag_id_t
*out_id
)
736 return mbuf_tag_id_find_internal(string
, (u_long
*)out_id
, 1);
743 mbuf_tag_type_t type
,
753 /* Sanity check parameters */
754 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< MTAG_FIRST_ID
||
755 id
>= mtag_id_next
|| length
< 1 || (length
& 0xffff0000) != 0 ||
760 /* Make sure this mtag hasn't already been allocated */
761 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
766 /* Allocate an mtag */
767 tag
= m_tag_alloc(id
, type
, length
, how
);
769 return how
== M_WAITOK
? ENOMEM
: EWOULDBLOCK
;
772 /* Attach the mtag and set *data_p */
773 m_tag_prepend(mbuf
, tag
);
783 mbuf_tag_type_t type
,
794 /* Sanity check parameters */
795 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< MTAG_FIRST_ID
||
796 id
>= mtag_id_next
|| length
== NULL
|| data_p
== NULL
) {
801 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
806 /* Copy out the pointer to the data and the lenght value */
807 *length
= tag
->m_tag_len
;
817 mbuf_tag_type_t type
)
821 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 || id
< MTAG_FIRST_ID
||
825 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
830 m_tag_delete(mbuf
, tag
);
835 void mbuf_stats(struct mbuf_stat
*stats
)
837 stats
->mbufs
= mbstat
.m_mbufs
;
838 stats
->clusters
= mbstat
.m_clusters
;
839 stats
->clfree
= mbstat
.m_clfree
;
840 stats
->drops
= mbstat
.m_drops
;
841 stats
->wait
= mbstat
.m_wait
;
842 stats
->drain
= mbstat
.m_drain
;
843 __builtin_memcpy(stats
->mtypes
, mbstat
.m_mtypes
, sizeof(stats
->mtypes
));
844 stats
->mcfail
= mbstat
.m_mcfail
;
845 stats
->mpfail
= mbstat
.m_mpfail
;
846 stats
->msize
= mbstat
.m_msize
;
847 stats
->mclbytes
= mbstat
.m_mclbytes
;
848 stats
->minclsize
= mbstat
.m_minclsize
;
849 stats
->mlen
= mbstat
.m_mlen
;
850 stats
->mhlen
= mbstat
.m_mhlen
;
851 stats
->bigclusters
= mbstat
.m_bigclusters
;
852 stats
->bigclfree
= mbstat
.m_bigclfree
;
853 stats
->bigmclbytes
= mbstat
.m_bigmclbytes
;
857 mbuf_allocpacket(mbuf_how_t how
, size_t packetlen
, unsigned int *maxchunks
, mbuf_t
*mbuf
)
861 unsigned int numpkts
= 1;
862 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
864 if (packetlen
== 0) {
868 m
= m_allocpacket_internal(&numpkts
, packetlen
, maxchunks
? &numchunks
: NULL
, how
, 1, 0);
870 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
)
884 * mbuf_copyback differs from m_copyback in a few ways:
885 * 1) mbuf_copyback will allocate clusters for new mbufs we append
886 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
887 * 3) mbuf_copyback reports whether or not the operation succeeded
888 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
903 const char *cp
= data
;
905 if (m
== NULL
|| len
== 0 || data
== NULL
)
908 while (off
> (mlen
= m
->m_len
)) {
911 if (m
->m_next
== 0) {
912 n
= m_getclr(how
, m
->m_type
);
917 n
->m_len
= MIN(MLEN
, len
+ off
);
924 mlen
= MIN(m
->m_len
- off
, len
);
925 if (mlen
< len
&& m
->m_next
== NULL
&& mbuf_trailingspace(m
) > 0) {
926 size_t grow
= MIN(mbuf_trailingspace(m
), len
- mlen
);
930 bcopy(cp
, off
+ (char*)mbuf_data(m
), (unsigned)mlen
);
938 if (m
->m_next
== 0) {
939 n
= m_get(how
, m
->m_type
);
944 if (len
> MINCLSIZE
) {
945 /* cluter allocation failure is okay, we can grow chain */
946 mbuf_mclget(how
, m
->m_type
, &n
);
948 n
->m_len
= MIN(mbuf_maxlen(n
), len
);
955 if ((m_start
->m_flags
& M_PKTHDR
) && (m_start
->m_pkthdr
.len
< totlen
))
956 m_start
->m_pkthdr
.len
= totlen
;