2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1982, 1986, 1988, 1991, 1993
25 * The Regents of the University of California. All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
59 * 10/15/97 Annette DeSchon (deschon@apple.com)
60 * Fixed bug in which all cluster mbufs were broken up
61 * into regular mbufs: Some clusters are now reserved.
62 * When a cluster is needed, regular mbufs are no longer
63 * used. (Radar 1683621)
64 * 20-May-95 Mac Gillon (mgillon) at NeXT
65 * New version based on 4.4
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/malloc.h>
72 #include <sys/kernel.h>
73 #include <sys/sysctl.h>
74 #include <sys/syslog.h>
75 #include <sys/protosw.h>
76 #include <sys/domain.h>
78 #include <kern/queue.h>
79 #include <kern/kern_types.h>
80 #include <kern/sched_prim.h>
82 #include <IOKit/IOMapper.h>
84 extern vm_offset_t
kmem_mb_alloc(vm_map_t
, int );
85 extern boolean_t
PE_parse_boot_arg(const char *, void *);
87 #define _MCLREF(p) (++mclrefcnt[mtocl(p)])
88 #define _MCLUNREF(p) (--mclrefcnt[mtocl(p)] == 0)
89 #define _M_CLEAR_PKTHDR(mbuf_ptr) (mbuf_ptr)->m_pkthdr.rcvif = NULL; \
90 (mbuf_ptr)->m_pkthdr.len = 0; \
91 (mbuf_ptr)->m_pkthdr.header = NULL; \
92 (mbuf_ptr)->m_pkthdr.csum_flags = 0; \
93 (mbuf_ptr)->m_pkthdr.csum_data = 0; \
94 (mbuf_ptr)->m_pkthdr.aux = (struct mbuf*)NULL; \
95 (mbuf_ptr)->m_pkthdr.vlan_tag = 0; \
96 (mbuf_ptr)->m_pkthdr.socket_id = 0; \
97 SLIST_INIT(&(mbuf_ptr)->m_pkthdr.tags);
99 /* kernel translater */
100 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
102 lck_mtx_t
* mbuf_mlock
;
103 lck_grp_t
* mbuf_mlock_grp
;
104 lck_grp_attr_t
* mbuf_mlock_grp_attr
;
105 lck_attr_t
* mbuf_mlock_attr
;
106 extern lck_mtx_t
*domain_proto_mtx
;
108 struct mbuf
*mfree
; /* mbuf free list */
109 struct mbuf
*mfreelater
; /* mbuf deallocation list */
110 extern vm_map_t mb_map
; /* special map */
111 int m_want
; /* sleepers on mbufs */
112 short *mclrefcnt
; /* mapped cluster reference counts */
114 static ppnum_t mcl_paddr_base
; /* Handle returned by IOMapper::iovmAlloc() */
115 union mcluster
*mclfree
; /* mapped cluster free list */
116 union mbigcluster
*mbigfree
; /* mapped cluster free list */
117 int max_linkhdr
; /* largest link-level header */
118 int max_protohdr
; /* largest protocol header */
119 int max_hdr
; /* largest link+protocol header */
120 int max_datalen
; /* MHLEN - max_hdr */
121 struct mbstat mbstat
; /* statistics */
122 union mcluster
*mbutl
; /* first mapped cluster address */
123 union mcluster
*embutl
; /* ending virtual address of mclusters */
125 static int nclpp
; /* # clusters per physical page */
127 static int m_howmany(int, size_t );
128 void m_reclaim(void);
129 static int m_clalloc(const int , const int, const size_t, int);
132 #define MF_NOWAIT 0x1
135 /* The number of cluster mbufs that are allocated, to start. */
136 #define MINCL max(16, 2)
138 static int mbuf_expand_thread_wakeup
= 0;
139 static int mbuf_expand_mcl
= 0;
140 static int mbuf_expand_big
= 0;
141 static int mbuf_expand_thread_initialized
= 0;
143 static void mbuf_expand_thread_init(void);
144 static void mbuf_expand_thread(void);
145 static int m_expand(int );
146 static caddr_t
m_bigalloc(int );
147 static void m_bigfree(caddr_t
, u_int
, caddr_t
);
148 __private_extern__
struct mbuf
* m_mbigget(struct mbuf
*, int );
150 static void m_range_check(void *addr
);
154 static int mfree_munge
= 0;
156 #define _MFREE_MUNGE(m) { \
159 vm_offset_t *element = (vm_offset_t *)(m); \
161 i < sizeof(struct mbuf)/sizeof(vm_offset_t); \
163 (element)[i] = 0xdeadbeef; \
168 munge_mbuf(struct mbuf
*m
)
171 vm_offset_t
*element
= (vm_offset_t
*)(m
);
173 i
< sizeof(struct mbuf
)/sizeof(vm_offset_t
);
175 (element
)[i
] = 0xdeadbeef;
177 #define _MFREE_MUNGE(m) { \
183 #define _MFREE_MUNGE(m)
187 #define _MINTGET(m, type) { \
189 if (((m) = mfree) != 0) { \
191 ++mclrefcnt[mtocl(m)]; \
192 mbstat.m_mtypes[MT_FREE]--; \
193 mbstat.m_mtypes[(type)]++; \
194 mfree = (m)->m_next; \
201 m_range_check(void *addr
)
203 if (addr
&& (addr
< (void *)mbutl
|| addr
>= (void *)embutl
))
204 panic("mbuf address out of range 0x%x", addr
);
207 __private_extern__
void
216 nclpp
= round_page_32(MCLBYTES
) / MCLBYTES
; /* see mbufgc() */
217 if (nclpp
< 1) nclpp
= 1;
218 mbuf_mlock_grp_attr
= lck_grp_attr_alloc_init();
220 mbuf_mlock_grp
= lck_grp_alloc_init("mbuf", mbuf_mlock_grp_attr
);
221 mbuf_mlock_attr
= lck_attr_alloc_init();
223 mbuf_mlock
= lck_mtx_alloc_init(mbuf_mlock_grp
, mbuf_mlock_attr
);
225 mbstat
.m_msize
= MSIZE
;
226 mbstat
.m_mclbytes
= MCLBYTES
;
227 mbstat
.m_minclsize
= MINCLSIZE
;
228 mbstat
.m_mlen
= MLEN
;
229 mbstat
.m_mhlen
= MHLEN
;
230 mbstat
.m_bigmclbytes
= NBPG
;
232 if (nmbclusters
== 0)
233 nmbclusters
= NMBCLUSTERS
;
234 MALLOC(mclrefcnt
, short *, nmbclusters
* sizeof (short),
238 for (m
= 0; m
< nmbclusters
; m
++)
241 /* Calculate the number of pages assigned to the cluster pool */
242 mcl_pages
= nmbclusters
/(NBPG
/CLBYTES
);
243 MALLOC(mcl_paddr
, int *, mcl_pages
* sizeof(int), M_TEMP
, M_WAITOK
);
246 /* Register with the I/O Bus mapper */
247 mcl_paddr_base
= IOMapperIOVMAlloc(mcl_pages
);
248 bzero((char *)mcl_paddr
, mcl_pages
* sizeof(int));
250 embutl
= (union mcluster
*)((unsigned char *)mbutl
+ (nmbclusters
* MCLBYTES
));
252 PE_parse_boot_arg("initmcl", &initmcl
);
254 if (m_clalloc(max(NBPG
/CLBYTES
, 1) * initmcl
, M_WAIT
, MCLBYTES
, 0) == 0)
258 (void) kernel_thread(kernel_task
, mbuf_expand_thread_init
);
266 * Allocate some number of mbuf clusters
267 * and place on cluster free list.
268 * Take the mbuf lock (if not already locked) and do not release it
275 const size_t bufsize
,
281 vm_offset_t page
= 0;
286 * Honor the caller's wish to block or not block.
287 * We have a way to grow the pool asynchronously,
288 * by kicking the dlil_input_thread.
290 i
= m_howmany(num
, bufsize
);
291 if (i
== 0 || nowait
== M_DONTWAIT
)
295 size
= round_page_32(i
* bufsize
);
296 page
= kmem_mb_alloc(mb_map
, size
);
299 size
= NBPG
; /* Try for 1 if failed */
300 page
= kmem_mb_alloc(mb_map
, size
);
305 numpages
= size
/ NBPG
;
306 for (i
= 0; i
< numpages
; i
++, page
+= NBPG
) {
307 if (((int)page
& PGOFSET
) == 0) {
308 ppnum_t offset
= ((char *)page
- (char *)mbutl
)/NBPG
;
309 ppnum_t new_page
= pmap_find_phys(kernel_pmap
, (vm_address_t
) page
);
312 * In the case of no mapper being available
313 * the following code nops and returns the
314 * input page, if there is a mapper the I/O
315 * page appropriate is returned.
317 new_page
= IOMapperInsertPage(mcl_paddr_base
, offset
, new_page
);
318 mcl_paddr
[offset
] = new_page
<< 12;
320 if (bufsize
== MCLBYTES
) {
321 union mcluster
*mcl
= (union mcluster
*)page
;
323 if (++mclrefcnt
[mtocl(mcl
)] != 0)
324 panic("m_clalloc already there");
325 mcl
->mcl_next
= mclfree
;
327 if (++mclrefcnt
[mtocl(mcl
)] != 0)
328 panic("m_clalloc already there");
329 mcl
->mcl_next
= mclfree
;
332 union mbigcluster
*mbc
= (union mbigcluster
*)page
;
334 if (++mclrefcnt
[mtocl(mbc
)] != 0)
335 panic("m_clalloc already there");
336 if (++mclrefcnt
[mtocl(mbc
) + 1] != 0)
337 panic("m_clalloc already there");
339 mbc
->mbc_next
= mbigfree
;
343 if (bufsize
== MCLBYTES
) {
344 int numcl
= numpages
<< 1;
345 mbstat
.m_clfree
+= numcl
;
346 mbstat
.m_clusters
+= numcl
;
349 mbstat
.m_bigclfree
+= numpages
;
350 mbstat
.m_bigclusters
+= numpages
;
356 * When non-blocking we kick a thread if we havve to grow the
357 * pool or if the number of free clusters is less than requested.
359 if (bufsize
== MCLBYTES
) {
361 /* Remember total number of clusters needed at this time */
362 i
+= mbstat
.m_clusters
;
363 if (i
> mbuf_expand_mcl
) {
365 if (mbuf_expand_thread_initialized
)
366 wakeup((caddr_t
)&mbuf_expand_thread_wakeup
);
370 if (mbstat
.m_clfree
>= num
)
374 /* Remember total number of 4KB clusters needed at this time */
375 i
+= mbstat
.m_bigclusters
;
376 if (i
> mbuf_expand_big
) {
378 if (mbuf_expand_thread_initialized
)
379 wakeup((caddr_t
)&mbuf_expand_thread_wakeup
);
383 if (mbstat
.m_bigclfree
>= num
)
390 * Add more free mbufs by cutting up a cluster.
393 m_expand(int canwait
)
397 if (mbstat
.m_clfree
< (mbstat
.m_clusters
>> 4)) {
399 * 1/16th of the total number of cluster mbufs allocated is
400 * reserved for large packets. The number reserved must
401 * always be < 1/2, or future allocation will be prevented.
403 (void)m_clalloc(1, canwait
, MCLBYTES
, 0);
405 if (mbstat
.m_clfree
< (mbstat
.m_clusters
>> 4))
409 MCLALLOC(mcl
, canwait
);
411 struct mbuf
*m
= (struct mbuf
*)mcl
;
414 mbstat
.m_mtypes
[MT_FREE
] += i
;
425 if (i
) wakeup((caddr_t
)&mfree
);
432 * When MGET failes, ask protocols to free space when short of memory,
433 * then re-attempt to allocate an mbuf.
444 (void) m_expand(canwait
);
447 (m
)->m_next
= (m
)->m_nextpkt
= 0;
448 (m
)->m_type
= (type
);
449 (m
)->m_data
= (m
)->m_dat
;
453 if (m
|| canwait
== M_DONTWAIT
)
464 if (mbuf_expand_thread_initialized
)
465 wakeup((caddr_t
)&mbuf_expand_thread_wakeup
);
473 (void) msleep((caddr_t
)&mfree
, 0, (PZERO
-1) | PDROP
, "m_retry", &ts
);
482 * As above; retry an MGETHDR.
491 if ((m
= m_retry(canwait
, type
))) {
492 m
->m_next
= m
->m_nextpkt
= 0;
493 m
->m_flags
|= M_PKTHDR
;
494 m
->m_data
= m
->m_pktdat
;
503 do_reclaim
= 1; /* drain is performed in pfslowtimo(), to avoid deadlocks */
508 * Space allocation routines.
509 * These are also available as macros
510 * for critical paths.
519 m_range_check(mfree
);
520 m_range_check(mclfree
);
521 m_range_check(mbigfree
);
525 m
->m_next
= m
->m_nextpkt
= 0;
527 m
->m_data
= m
->m_dat
;
531 (m
) = m_retry(nowait
, type
);
533 m_range_check(mfree
);
534 m_range_check(mclfree
);
535 m_range_check(mbigfree
);
548 m_range_check(mfree
);
549 m_range_check(mclfree
);
550 m_range_check(mbigfree
);
555 m
->m_next
= m
->m_nextpkt
= 0;
557 m
->m_data
= m
->m_pktdat
;
558 m
->m_flags
= M_PKTHDR
;
562 m
= m_retryhdr(nowait
, type
);
564 m_range_check(mfree
);
565 m_range_check(mclfree
);
566 m_range_check(mbigfree
);
579 MGET(m
, nowait
, type
);
582 bzero(mtod(m
, caddr_t
), MLEN
);
590 struct mbuf
*n
= m
->m_next
;
594 m_range_check(mfree
);
595 m_range_check(mclfree
);
597 if (m
->m_type
== MT_FREE
)
598 panic("freeing free mbuf");
600 /* Free the aux data if there is any */
601 if ((m
->m_flags
& M_PKTHDR
) && m
->m_pkthdr
.aux
)
603 m_freem(m
->m_pkthdr
.aux
);
605 if ((m
->m_flags
& M_PKTHDR
) != 0)
606 m_tag_delete_chain(m
, NULL
);
609 if ((m
->m_flags
& M_EXT
))
611 if (MCLHASREFERENCE(m
)) {
612 remque((queue_t
)&m
->m_ext
.ext_refs
);
613 } else if (m
->m_ext
.ext_free
== NULL
) {
614 union mcluster
*mcl
= (union mcluster
*)m
->m_ext
.ext_buf
;
618 if (_MCLUNREF(mcl
)) {
619 mcl
->mcl_next
= mclfree
;
624 /* *** Since m_split() increments "mclrefcnt[mtocl(m->m_ext.ext_buf)]",
625 and AppleTalk ADSP uses m_split(), this incorrect sanity check
628 else /* sanity check - not referenced this way */
629 panic("m_free m_ext cluster not free");
632 (*(m
->m_ext
.ext_free
))(m
->m_ext
.ext_buf
,
633 m
->m_ext
.ext_size
, m
->m_ext
.ext_arg
);
636 mbstat
.m_mtypes
[m
->m_type
]--;
640 mbstat
.m_mtypes
[m
->m_type
]++;
648 if (i
) wakeup((caddr_t
)&mfree
);
652 /* m_mclget() add an mbuf cluster to a normal mbuf */
658 MCLALLOC(m
->m_ext
.ext_buf
, nowait
);
659 if (m
->m_ext
.ext_buf
) {
660 m
->m_data
= m
->m_ext
.ext_buf
;
662 m
->m_ext
.ext_size
= MCLBYTES
;
663 m
->m_ext
.ext_free
= 0;
664 m
->m_ext
.ext_refs
.forward
= m
->m_ext
.ext_refs
.backward
=
671 /* m_mclalloc() allocate an mbuf cluster */
678 (void)m_clalloc(1, nowait
, MCLBYTES
, 0);
679 if ((p
= (caddr_t
)mclfree
)) {
680 ++mclrefcnt
[mtocl(p
)];
682 mclfree
= ((union mcluster
*)p
)->mcl_next
;
691 /* m_mclfree() releases a reference to a cluster allocated by MCLALLOC,
692 * freeing the cluster if the reference count has reached 0. */
701 if (--mclrefcnt
[mtocl(p
)] == 0) {
702 ((union mcluster
*)(p
))->mcl_next
= mclfree
;
703 mclfree
= (union mcluster
*)(p
);
709 /* mcl_hasreference() checks if a cluster of an mbuf is referenced by another mbuf */
714 return (m
->m_ext
.ext_refs
.forward
!= &(m
->m_ext
.ext_refs
));
717 __private_extern__ caddr_t
718 m_bigalloc(int nowait
)
722 (void)m_clalloc(1, nowait
, NBPG
, 0);
723 if ((p
= (caddr_t
)mbigfree
)) {
724 if (mclrefcnt
[mtocl(p
)] != mclrefcnt
[mtocl(p
) + 1])
725 panic("m_bigalloc mclrefcnt %x mismatch %d != %d",
726 p
, mclrefcnt
[mtocl(p
)], mclrefcnt
[mtocl(p
) + 1]);
727 if (mclrefcnt
[mtocl(p
)] || mclrefcnt
[mtocl(p
) + 1])
728 panic("m_bigalloc mclrefcnt %x not null %d != %d",
729 p
, mclrefcnt
[mtocl(p
)], mclrefcnt
[mtocl(p
) + 1]);
730 ++mclrefcnt
[mtocl(p
)];
731 ++mclrefcnt
[mtocl(p
) + 1];
732 mbstat
.m_bigclfree
--;
733 mbigfree
= ((union mbigcluster
*)p
)->mbc_next
;
741 __private_extern__
void
742 m_bigfree(caddr_t p
, __unused u_int size
, __unused caddr_t arg
)
746 if (mclrefcnt
[mtocl(p
)] != mclrefcnt
[mtocl(p
) + 1])
747 panic("m_bigfree mclrefcnt %x mismatch %d != %d",
748 p
, mclrefcnt
[mtocl(p
)], mclrefcnt
[mtocl(p
) + 1]);
749 --mclrefcnt
[mtocl(p
)];
750 --mclrefcnt
[mtocl(p
) + 1];
751 if (mclrefcnt
[mtocl(p
)] == 0) {
752 ((union mbigcluster
*)(p
))->mbc_next
= mbigfree
;
753 mbigfree
= (union mbigcluster
*)(p
);
754 mbstat
.m_bigclfree
++;
758 /* m_mbigget() add an 4KB mbuf cluster to a normal mbuf */
759 __private_extern__
struct mbuf
*
760 m_mbigget(struct mbuf
*m
, int nowait
)
762 m
->m_ext
.ext_buf
= m_bigalloc(nowait
);
763 if (m
->m_ext
.ext_buf
) {
764 m
->m_data
= m
->m_ext
.ext_buf
;
766 m
->m_ext
.ext_size
= NBPG
;
767 m
->m_ext
.ext_free
= m_bigfree
;
768 m
->m_ext
.ext_arg
= 0;
769 m
->m_ext
.ext_refs
.forward
= m
->m_ext
.ext_refs
.backward
=
783 to
->m_pkthdr
= from
->m_pkthdr
;
784 from
->m_pkthdr
.aux
= (struct mbuf
*)NULL
;
785 SLIST_INIT(&from
->m_pkthdr
.tags
); /* purge tags from src */
786 to
->m_flags
= from
->m_flags
& M_COPYFLAGS
;
787 to
->m_data
= (to
)->m_pktdat
;
791 * "Move" mbuf pkthdr from "from" to "to".
792 * "from" must have M_PKTHDR set, and "to" must be empty.
796 m_move_pkthdr(struct mbuf
*to
, struct mbuf
*from
)
798 KASSERT((to
->m_flags
& M_EXT
) == 0, ("m_move_pkthdr: to has cluster"));
800 to
->m_flags
= from
->m_flags
& M_COPYFLAGS
;
801 to
->m_data
= to
->m_pktdat
;
802 to
->m_pkthdr
= from
->m_pkthdr
; /* especially tags */
803 SLIST_INIT(&from
->m_pkthdr
.tags
); /* purge tags from src */
804 from
->m_flags
&= ~M_PKTHDR
;
809 * Duplicate "from"'s mbuf pkthdr in "to".
810 * "from" must have M_PKTHDR set, and "to" must be empty.
811 * In particular, this does a deep copy of the packet tags.
814 m_dup_pkthdr(struct mbuf
*to
, struct mbuf
*from
, int how
)
816 to
->m_flags
= (from
->m_flags
& M_COPYFLAGS
) | (to
->m_flags
& M_EXT
);
817 if ((to
->m_flags
& M_EXT
) == 0)
818 to
->m_data
= to
->m_pktdat
;
819 to
->m_pkthdr
= from
->m_pkthdr
;
820 SLIST_INIT(&to
->m_pkthdr
.tags
);
821 return (m_tag_copy_chain(to
, from
, how
));
825 * return a list of mbuf hdrs that point to clusters...
826 * try for num_needed, if wantall is not set, return whatever
827 * number were available... set up the first num_with_pkthdrs
828 * with mbuf hdrs configured as packet headers... these are
829 * chained on the m_nextpkt field... any packets requested beyond
830 * this are chained onto the last packet header's m_next field.
831 * The size of the cluster is controlled by the paramter bufsize.
833 __private_extern__
struct mbuf
*
834 m_getpackets_internal(unsigned int *num_needed
, int num_with_pkthdrs
, int how
, int wantall
, size_t bufsize
)
837 struct mbuf
**np
, *top
;
838 unsigned int num
, needed
= *num_needed
;
840 if (bufsize
!= MCLBYTES
&& bufsize
!= NBPG
)
846 (void)m_clalloc(needed
, how
, bufsize
, 0); /* takes the MBUF_LOCK, but doesn't release it... */
848 for (num
= 0; num
< needed
; num
++) {
849 m_range_check(mfree
);
850 m_range_check(mclfree
);
851 m_range_check(mbigfree
);
853 if (mfree
&& ((bufsize
== NBPG
&& mbigfree
) || (bufsize
== MCLBYTES
&& mclfree
))) {
854 /* mbuf + cluster are available */
858 ++mclrefcnt
[mtocl(m
)];
859 mbstat
.m_mtypes
[MT_FREE
]--;
860 mbstat
.m_mtypes
[MT_DATA
]++;
861 if (bufsize
== NBPG
) {
862 m
->m_ext
.ext_buf
= (caddr_t
)mbigfree
; /* get the big cluster */
863 ++mclrefcnt
[mtocl(m
->m_ext
.ext_buf
)];
864 ++mclrefcnt
[mtocl(m
->m_ext
.ext_buf
) + 1];
865 mbstat
.m_bigclfree
--;
866 mbigfree
= ((union mbigcluster
*)(m
->m_ext
.ext_buf
))->mbc_next
;
867 m
->m_ext
.ext_free
= m_bigfree
;
868 m
->m_ext
.ext_size
= NBPG
;
870 m
->m_ext
.ext_buf
= (caddr_t
)mclfree
; /* get the cluster */
871 ++mclrefcnt
[mtocl(m
->m_ext
.ext_buf
)];
873 mclfree
= ((union mcluster
*)(m
->m_ext
.ext_buf
))->mcl_next
;
874 m
->m_ext
.ext_free
= 0;
875 m
->m_ext
.ext_size
= MCLBYTES
;
877 m
->m_ext
.ext_arg
= 0;
878 m
->m_ext
.ext_refs
.forward
= m
->m_ext
.ext_refs
.backward
= &m
->m_ext
.ext_refs
;
879 m
->m_next
= m
->m_nextpkt
= 0;
881 m
->m_data
= m
->m_ext
.ext_buf
;
884 if (num_with_pkthdrs
== 0)
887 m
->m_flags
= M_PKTHDR
| M_EXT
;
895 if (num_with_pkthdrs
== 0) {
896 MGET(m
, how
, MT_DATA
);
898 MGETHDR(m
, how
, MT_DATA
);
906 m
= m_mbigget(m
, how
);
908 m
= m_mclget(m
, how
);
909 if ((m
->m_flags
& M_EXT
) == 0) {
917 if (num_with_pkthdrs
)
927 if (wantall
&& top
) {
936 * Return list of mbuf linked by m_nextpkt
937 * Try for num_needed, and if wantall is not set, return whatever
938 * number were available
939 * The size of each mbuf in the list is controlled by the parameter packetlen.
940 * Each mbuf of the list may have a chain of mbufs linked by m_next. Each mbuf in
941 * the chain is called a segment.
942 * If maxsegments is not null and the value pointed to is not null, this specify
943 * the maximum number of segments for a chain of mbufs.
944 * If maxsegments is zero or the value pointed to is zero the
945 * caller does not have any restriction on the number of segments.
946 * The actual number of segments of a mbuf chain is return in the value pointed
948 * When possible the allocation is done under a single lock.
951 __private_extern__
struct mbuf
*
952 m_allocpacket_internal(unsigned int *num_needed
, size_t packetlen
, unsigned int * maxsegments
,
953 int how
, int wantall
, size_t wantsize
)
955 struct mbuf
**np
, *top
;
958 unsigned int numchunks
= 0;
964 if (packetlen
<= MINCLSIZE
)
966 else if (packetlen
> MCLBYTES
)
970 } else if (wantsize
== MCLBYTES
|| wantsize
== NBPG
)
975 if (bufsize
<= MHLEN
) {
977 } else if (bufsize
<= MINCLSIZE
) {
978 if (maxsegments
!= NULL
&& *maxsegments
== 1) {
984 } else if (bufsize
== NBPG
) {
985 numchunks
= ((packetlen
- 1) >> PGSHIFT
) + 1;
987 numchunks
= ((packetlen
- 1) >> MCLSHIFT
) + 1;
989 if (maxsegments
!= NULL
) {
990 if (*maxsegments
&& numchunks
> *maxsegments
) {
991 *maxsegments
= numchunks
;
994 *maxsegments
= numchunks
;
996 /* m_clalloc takes the MBUF_LOCK, but do not release it */
997 (void)m_clalloc(numchunks
, how
, (bufsize
== NBPG
) ? NBPG
: MCLBYTES
, 0);
998 for (num
= 0; num
< *num_needed
; num
++) {
999 struct mbuf
**nm
, *pkt
= 0;
1004 m_range_check(mfree
);
1005 m_range_check(mclfree
);
1006 m_range_check(mbigfree
);
1008 for (len
= 0; len
< packetlen
; ) {
1009 struct mbuf
*m
= NULL
;
1011 if (wantsize
== 0 && packetlen
> MINCLSIZE
) {
1012 if (packetlen
- len
> MCLBYTES
)
1019 if (mfree
&& ((bufsize
== NBPG
&& mbigfree
) || (bufsize
== MCLBYTES
&& mclfree
))) {
1020 /* mbuf + cluster are available */
1024 ++mclrefcnt
[mtocl(m
)];
1025 mbstat
.m_mtypes
[MT_FREE
]--;
1026 mbstat
.m_mtypes
[MT_DATA
]++;
1027 if (bufsize
== NBPG
) {
1028 m
->m_ext
.ext_buf
= (caddr_t
)mbigfree
; /* get the big cluster */
1029 ++mclrefcnt
[mtocl(m
->m_ext
.ext_buf
)];
1030 ++mclrefcnt
[mtocl(m
->m_ext
.ext_buf
) + 1];
1031 mbstat
.m_bigclfree
--;
1032 mbigfree
= ((union mbigcluster
*)(m
->m_ext
.ext_buf
))->mbc_next
;
1033 m
->m_ext
.ext_free
= m_bigfree
;
1034 m
->m_ext
.ext_size
= NBPG
;
1036 m
->m_ext
.ext_buf
= (caddr_t
)mclfree
; /* get the cluster */
1037 ++mclrefcnt
[mtocl(m
->m_ext
.ext_buf
)];
1039 mclfree
= ((union mcluster
*)(m
->m_ext
.ext_buf
))->mcl_next
;
1040 m
->m_ext
.ext_free
= 0;
1041 m
->m_ext
.ext_size
= MCLBYTES
;
1043 m
->m_ext
.ext_arg
= 0;
1044 m
->m_ext
.ext_refs
.forward
= m
->m_ext
.ext_refs
.backward
= &m
->m_ext
.ext_refs
;
1045 m
->m_next
= m
->m_nextpkt
= 0;
1046 m
->m_type
= MT_DATA
;
1047 m
->m_data
= m
->m_ext
.ext_buf
;
1052 m
->m_flags
= M_PKTHDR
| M_EXT
;
1061 MGETHDR(m
, how
, MT_DATA
);
1063 MGET(m
, how
, MT_DATA
);
1069 if (bufsize
<= MINCLSIZE
) {
1070 if (bufsize
> MHLEN
) {
1071 MGET(m
->m_next
, how
, MT_DATA
);
1072 if (m
->m_next
== 0) {
1079 if (bufsize
== NBPG
)
1080 m
= m_mbigget(m
, how
);
1082 m
= m_mclget(m
, how
);
1083 if ((m
->m_flags
& M_EXT
) == 0) {
1095 np
= &pkt
->m_nextpkt
;
1102 if (wantall
&& top
) {
1112 /* Best effort to get a mbuf cluster + pkthdr under one lock.
1113 * If we don't have them avail, just bail out and use the regular
1115 * Used by drivers to allocated packets on receive ring.
1117 __private_extern__
struct mbuf
*
1118 m_getpacket_how(int how
)
1120 unsigned int num_needed
= 1;
1122 return m_getpackets_internal(&num_needed
, 1, how
, 1, MCLBYTES
);
1125 /* Best effort to get a mbuf cluster + pkthdr under one lock.
1126 * If we don't have them avail, just bail out and use the regular
1128 * Used by drivers to allocated packets on receive ring.
1133 unsigned int num_needed
= 1;
1135 return m_getpackets_internal(&num_needed
, 1, M_WAITOK
, 1, MCLBYTES
);
1140 * return a list of mbuf hdrs that point to clusters...
1141 * try for num_needed, if this can't be met, return whatever
1142 * number were available... set up the first num_with_pkthdrs
1143 * with mbuf hdrs configured as packet headers... these are
1144 * chained on the m_nextpkt field... any packets requested beyond
1145 * this are chained onto the last packet header's m_next field.
1148 m_getpackets(int num_needed
, int num_with_pkthdrs
, int how
)
1150 unsigned int n
= num_needed
;
1152 return m_getpackets_internal(&n
, num_with_pkthdrs
, how
, 0, MCLBYTES
);
1157 * return a list of mbuf hdrs set up as packet hdrs
1158 * chained together on the m_nextpkt field
1161 m_getpackethdrs(int num_needed
, int how
)
1164 struct mbuf
**np
, *top
;
1171 while (num_needed
--) {
1172 m_range_check(mfree
);
1173 m_range_check(mclfree
);
1174 m_range_check(mbigfree
);
1176 if ((m
= mfree
)) { /* mbufs are available */
1179 ++mclrefcnt
[mtocl(m
)];
1180 mbstat
.m_mtypes
[MT_FREE
]--;
1181 mbstat
.m_mtypes
[MT_DATA
]++;
1183 m
->m_next
= m
->m_nextpkt
= 0;
1184 m
->m_type
= MT_DATA
;
1185 m
->m_flags
= M_PKTHDR
;
1187 m
->m_data
= m
->m_pktdat
;
1193 m
= m_retryhdr(how
, MT_DATA
);
1207 /* free and mbuf list (m_nextpkt) while following m_next under one lock.
1208 * returns the count for mbufs packets freed. Used by the drivers.
1214 struct mbuf
*nextpkt
;
1221 nextpkt
= m
->m_nextpkt
; /* chain of linked mbufs from driver */
1227 while (m
) { /* free the mbuf chain (like mfreem) */
1232 m_range_check(mfree
);
1233 m_range_check(mclfree
);
1234 m_range_check(mbigfree
);
1237 /* Free the aux data if there is any */
1238 if ((m
->m_flags
& M_PKTHDR
) && m
->m_pkthdr
.aux
) {
1240 * Treat the current m as the nextpkt and set m
1241 * to the aux data. Preserve nextpkt in m->m_nextpkt.
1242 * This lets us free the aux data in this loop
1243 * without having to call m_freem recursively,
1244 * which wouldn't work because we've still got
1247 m
->m_nextpkt
= nextpkt
;
1249 m
= nextpkt
->m_pkthdr
.aux
;
1250 nextpkt
->m_pkthdr
.aux
= NULL
;
1253 if ((m
->m_flags
& M_PKTHDR
) != 0 && !SLIST_EMPTY(&m
->m_pkthdr
.tags
)) {
1254 /* A quick (albeit inefficient) expedient */
1256 m_tag_delete_chain(m
, NULL
);
1262 if (n
&& n
->m_nextpkt
)
1263 panic("m_freem_list: m_nextpkt of m_next != NULL");
1264 if (m
->m_type
== MT_FREE
)
1265 panic("freeing free mbuf");
1267 if (m
->m_flags
& M_EXT
) {
1268 if (MCLHASREFERENCE(m
)) {
1269 remque((queue_t
)&m
->m_ext
.ext_refs
);
1270 } else if (m
->m_ext
.ext_free
== NULL
) {
1271 union mcluster
*mcl
= (union mcluster
*)m
->m_ext
.ext_buf
;
1275 if (_MCLUNREF(mcl
)) {
1276 mcl
->mcl_next
= mclfree
;
1281 (*(m
->m_ext
.ext_free
))(m
->m_ext
.ext_buf
,
1282 m
->m_ext
.ext_size
, m
->m_ext
.ext_arg
);
1285 mbstat
.m_mtypes
[m
->m_type
]--;
1286 (void) _MCLUNREF(m
);
1288 mbstat
.m_mtypes
[MT_FREE
]++;
1289 m
->m_type
= MT_FREE
;
1296 m
= nextpkt
; /* bump m with saved nextpkt if any */
1304 wakeup((caddr_t
)&mfree
);
1318 * Mbuffer utility routines.
1321 * Compute the amount of space available
1322 * before the current start of data in an mbuf.
1328 if (m
->m_flags
& M_EXT
) {
1329 if (MCLHASREFERENCE(m
))
1331 return (m
->m_data
- m
->m_ext
.ext_buf
);
1333 if (m
->m_flags
& M_PKTHDR
)
1334 return (m
->m_data
- m
->m_pktdat
);
1335 return (m
->m_data
- m
->m_dat
);
1339 * Compute the amount of space available
1340 * after the end of data in an mbuf.
1346 if (m
->m_flags
& M_EXT
) {
1347 if (MCLHASREFERENCE(m
))
1349 return (m
->m_ext
.ext_buf
+ m
->m_ext
.ext_size
-
1350 (m
->m_data
+ m
->m_len
));
1352 return (&m
->m_dat
[MLEN
] - (m
->m_data
+ m
->m_len
));
1356 * Lesser-used path for M_PREPEND:
1357 * allocate new mbuf to prepend to chain,
1359 * Does not adjust packet header length.
1369 MGET(mn
, how
, m
->m_type
);
1370 if (mn
== (struct mbuf
*)NULL
) {
1372 return ((struct mbuf
*)NULL
);
1374 if (m
->m_flags
& M_PKTHDR
) {
1375 M_COPY_PKTHDR(mn
, m
);
1376 m
->m_flags
&= ~M_PKTHDR
;
1387 * Replacement for old M_PREPEND macro:
1388 * allocate new mbuf to prepend to chain,
1389 * copy junk along, and adjust length.
1398 if (M_LEADINGSPACE(m
) >= len
) {
1402 m
= m_prepend(m
, len
, how
);
1404 if ((m
) && (m
->m_flags
& M_PKTHDR
))
1405 m
->m_pkthdr
.len
+= len
;
1410 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1411 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
1412 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
1423 struct mbuf
*n
, **np
;
1428 if (off
< 0 || len
< 0)
1430 if (off
== 0 && m
->m_flags
& M_PKTHDR
)
1433 while (off
>= m
->m_len
) {
1445 m_range_check(mfree
);
1446 m_range_check(mclfree
);
1447 m_range_check(mbigfree
);
1450 if (len
!= M_COPYALL
)
1456 ++mclrefcnt
[mtocl(n
)];
1457 mbstat
.m_mtypes
[MT_FREE
]--;
1458 mbstat
.m_mtypes
[m
->m_type
]++;
1460 n
->m_next
= n
->m_nextpkt
= 0;
1461 n
->m_type
= m
->m_type
;
1462 n
->m_data
= n
->m_dat
;
1466 n
= m_retry(wait
, m
->m_type
);
1474 M_COPY_PKTHDR(n
, m
);
1475 if (len
== M_COPYALL
)
1476 n
->m_pkthdr
.len
-= off0
;
1478 n
->m_pkthdr
.len
= len
;
1481 if (len
== M_COPYALL
) {
1482 if (min(len
, (m
->m_len
- off
)) == len
) {
1483 printf("m->m_len %d - off %d = %d, %d\n",
1484 m
->m_len
, off
, m
->m_len
- off
,
1485 min(len
, (m
->m_len
- off
)));
1488 n
->m_len
= min(len
, (m
->m_len
- off
));
1489 if (n
->m_len
== M_COPYALL
) {
1490 printf("n->m_len == M_COPYALL, fixing\n");
1493 if (m
->m_flags
& M_EXT
) {
1494 n
->m_ext
= m
->m_ext
;
1495 insque((queue_t
)&n
->m_ext
.ext_refs
, (queue_t
)&m
->m_ext
.ext_refs
);
1496 n
->m_data
= m
->m_data
+ off
;
1497 n
->m_flags
|= M_EXT
;
1499 bcopy(mtod(m
, caddr_t
)+off
, mtod(n
, caddr_t
),
1500 (unsigned)n
->m_len
);
1502 if (len
!= M_COPYALL
)
1524 * equivilent to m_copym except that all necessary
1525 * mbuf hdrs are allocated within this routine
1526 * also, the last mbuf and offset accessed are passed
1527 * out and can be passed back in to avoid having to
1528 * rescan the entire mbuf list (normally hung off of the socket)
1536 struct mbuf
**m_last
,
1539 struct mbuf
*n
, **np
= 0;
1541 struct mbuf
*top
= 0;
1545 if (off
== 0 && m
->m_flags
& M_PKTHDR
)
1552 while (off
>= m
->m_len
) {
1561 m_range_check(mfree
);
1562 m_range_check(mclfree
);
1563 m_range_check(mbigfree
);
1569 panic("m_gethdr_and_copym");
1574 ++mclrefcnt
[mtocl(n
)];
1575 mbstat
.m_mtypes
[MT_FREE
]--;
1576 mbstat
.m_mtypes
[type
]++;
1578 n
->m_next
= n
->m_nextpkt
= 0;
1582 n
->m_data
= n
->m_dat
;
1585 n
->m_data
= n
->m_pktdat
;
1586 n
->m_flags
= M_PKTHDR
;
1592 n
= m_retry(wait
, type
);
1594 n
= m_retryhdr(wait
, type
);
1607 M_COPY_PKTHDR(n
, m
);
1608 n
->m_pkthdr
.len
= len
;
1611 n
->m_len
= min(len
, (m
->m_len
- off
));
1613 if (m
->m_flags
& M_EXT
) {
1614 n
->m_ext
= m
->m_ext
;
1615 insque((queue_t
)&n
->m_ext
.ext_refs
, (queue_t
)&m
->m_ext
.ext_refs
);
1616 n
->m_data
= m
->m_data
+ off
;
1617 n
->m_flags
|= M_EXT
;
1619 bcopy(mtod(m
, caddr_t
)+off
, mtod(n
, caddr_t
),
1620 (unsigned)n
->m_len
);
1625 if ((off
+ n
->m_len
) == m
->m_len
) {
1626 *m_last
= m
->m_next
;
1630 *m_off
= off
+ n
->m_len
;
1652 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1653 * continuing for "len" bytes, into the indicated buffer.
1663 if (off
< 0 || len
< 0)
1664 panic("m_copydata");
1667 panic("m_copydata");
1675 panic("m_copydata");
1676 count
= min(m
->m_len
- off
, len
);
1677 bcopy(mtod(m
, caddr_t
) + off
, cp
, count
);
1686 * Concatenate mbuf chain n to m.
1687 * Both chains must be of the same type (e.g. MT_DATA).
1688 * Any m_pkthdr is not updated.
1691 struct mbuf
*m
, struct mbuf
*n
)
1696 if (m
->m_flags
& M_EXT
||
1697 m
->m_data
+ m
->m_len
+ n
->m_len
>= &m
->m_dat
[MLEN
]) {
1698 /* just join the two chains */
1702 /* splat the data from one into the other */
1703 bcopy(mtod(n
, caddr_t
), mtod(m
, caddr_t
) + m
->m_len
,
1705 m
->m_len
+= n
->m_len
;
1719 if ((m
= mp
) == NULL
)
1725 while (m
!= NULL
&& len
> 0) {
1726 if (m
->m_len
<= len
) {
1737 if (m
->m_flags
& M_PKTHDR
)
1738 m
->m_pkthdr
.len
-= (req_len
- len
);
1741 * Trim from tail. Scan the mbuf chain,
1742 * calculating its length and finding the last mbuf.
1743 * If the adjustment only affects this mbuf, then just
1744 * adjust and return. Otherwise, rescan and truncate
1745 * after the remaining size.
1751 if (m
->m_next
== (struct mbuf
*)0)
1755 if (m
->m_len
>= len
) {
1758 if (m
->m_flags
& M_PKTHDR
)
1759 m
->m_pkthdr
.len
-= len
;
1766 * Correct length for chain is "count".
1767 * Find the mbuf with last data, adjust its length,
1768 * and toss data from remaining mbufs on chain.
1771 if (m
->m_flags
& M_PKTHDR
)
1772 m
->m_pkthdr
.len
= count
;
1773 for (; m
; m
= m
->m_next
) {
1774 if (m
->m_len
>= count
) {
1780 while ((m
= m
->m_next
))
1786 * Rearange an mbuf chain so that len bytes are contiguous
1787 * and in the data area of an mbuf (so that mtod and dtom
1788 * will work for a structure of size len). Returns the resulting
1789 * mbuf chain on success, frees it and returns null on failure.
1790 * If there is room, it will add up to max_protohdr-len extra bytes to the
1791 * contiguous region in an attempt to avoid being called next time.
1805 * If first mbuf has no cluster, and has room for len bytes
1806 * without shifting current data, pullup into it,
1807 * otherwise allocate a new mbuf to prepend to the chain.
1809 if ((n
->m_flags
& M_EXT
) == 0 &&
1810 n
->m_data
+ len
< &n
->m_dat
[MLEN
] && n
->m_next
) {
1811 if (n
->m_len
>= len
)
1819 MGET(m
, M_DONTWAIT
, n
->m_type
);
1823 if (n
->m_flags
& M_PKTHDR
) {
1824 M_COPY_PKTHDR(m
, n
);
1825 n
->m_flags
&= ~M_PKTHDR
;
1828 space
= &m
->m_dat
[MLEN
] - (m
->m_data
+ m
->m_len
);
1830 count
= min(min(max(len
, max_protohdr
), space
), n
->m_len
);
1831 bcopy(mtod(n
, caddr_t
), mtod(m
, caddr_t
) + m
->m_len
,
1841 } while (len
> 0 && n
);
1855 * Partition an mbuf chain in two pieces, returning the tail --
1856 * all but the first len0 bytes. In case of failure, it returns NULL and
1857 * attempts to restore the chain to its original state.
1866 unsigned len
= len0
, remain
;
1868 for (m
= m0
; m
&& len
> m
->m_len
; m
= m
->m_next
)
1872 remain
= m
->m_len
- len
;
1873 if (m0
->m_flags
& M_PKTHDR
) {
1874 MGETHDR(n
, wait
, m0
->m_type
);
1877 n
->m_pkthdr
.rcvif
= m0
->m_pkthdr
.rcvif
;
1878 n
->m_pkthdr
.len
= m0
->m_pkthdr
.len
- len0
;
1879 m0
->m_pkthdr
.len
= len0
;
1880 if (m
->m_flags
& M_EXT
)
1882 if (remain
> MHLEN
) {
1883 /* m can't be the lead packet */
1885 n
->m_next
= m_split(m
, len
, wait
);
1886 if (n
->m_next
== 0) {
1892 MH_ALIGN(n
, remain
);
1893 } else if (remain
== 0) {
1898 MGET(n
, wait
, m
->m_type
);
1904 if (m
->m_flags
& M_EXT
) {
1905 n
->m_flags
|= M_EXT
;
1907 n
->m_ext
= m
->m_ext
;
1908 insque((queue_t
)&n
->m_ext
.ext_refs
, (queue_t
)&m
->m_ext
.ext_refs
);
1910 n
->m_data
= m
->m_data
+ len
;
1912 bcopy(mtod(m
, caddr_t
) + len
, mtod(n
, caddr_t
), remain
);
1916 n
->m_next
= m
->m_next
;
1921 * Routine to copy from device local memory into mbufs.
1929 void (*copy
)(const void *, void *, size_t))
1932 struct mbuf
*top
= 0, **mp
= &top
;
1933 int off
= off0
, len
;
1941 * If 'off' is non-zero, packet is trailer-encapsulated,
1942 * so we have to skip the type and length fields.
1944 cp
+= off
+ 2 * sizeof(u_int16_t
);
1945 totlen
-= 2 * sizeof(u_int16_t
);
1947 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
1950 m
->m_pkthdr
.rcvif
= ifp
;
1951 m
->m_pkthdr
.len
= totlen
;
1954 while (totlen
> 0) {
1956 MGET(m
, M_DONTWAIT
, MT_DATA
);
1963 len
= min(totlen
, epkt
- cp
);
1964 if (len
>= MINCLSIZE
) {
1965 MCLGET(m
, M_DONTWAIT
);
1966 if (m
->m_flags
& M_EXT
)
1967 m
->m_len
= len
= min(len
, MCLBYTES
);
1969 /* give up when it's out of cluster mbufs */
1977 * Place initial small packet/header at end of mbuf.
1979 if (len
< m
->m_len
) {
1980 if (top
== 0 && len
+ max_linkhdr
<= m
->m_len
)
1981 m
->m_data
+= max_linkhdr
;
1987 copy(cp
, mtod(m
, caddr_t
), (unsigned)len
);
1989 bcopy(cp
, mtod(m
, caddr_t
), (unsigned)len
);
2001 * Cluster freelist allocation check. The mbuf lock must be held.
2002 * Ensure hysteresis between hi/lo.
2005 m_howmany(int num
, size_t bufsize
)
2009 /* Bail if we've maxed out the mbuf memory map */
2010 if (mbstat
.m_clusters
+ (mbstat
.m_bigclusters
<< 1) < nmbclusters
) {
2013 if (bufsize
== MCLBYTES
) {
2015 if (mbstat
.m_clusters
< MINCL
)
2016 return (MINCL
- mbstat
.m_clusters
);
2017 /* Too few (free < 1/2 total) and not over maximum */
2018 if (mbstat
.m_clusters
< (nmbclusters
>> 1)) {
2019 if (num
>= mbstat
.m_clfree
)
2020 i
= num
- mbstat
.m_clfree
;
2021 if (((mbstat
.m_clusters
+ num
) >> 1) > mbstat
.m_clfree
)
2022 j
= ((mbstat
.m_clusters
+ num
) >> 1) - mbstat
.m_clfree
;
2024 if (i
+ mbstat
.m_clusters
>= (nmbclusters
>> 1))
2025 i
= (nmbclusters
>> 1) - mbstat
.m_clusters
;
2029 if (mbstat
.m_bigclusters
< MINCL
)
2030 return (MINCL
- mbstat
.m_bigclusters
);
2031 /* Too few (free < 1/2 total) and not over maximum */
2032 if (mbstat
.m_bigclusters
< (nmbclusters
>> 2)) {
2033 if (num
>= mbstat
.m_bigclfree
)
2034 i
= num
- mbstat
.m_bigclfree
;
2035 if (((mbstat
.m_bigclusters
+ num
) >> 1) > mbstat
.m_bigclfree
)
2036 j
= ((mbstat
.m_bigclusters
+ num
) >> 1) - mbstat
.m_bigclfree
;
2038 if (i
+ mbstat
.m_bigclusters
>= (nmbclusters
>> 2))
2039 i
= (nmbclusters
>> 2) - mbstat
.m_bigclusters
;
2047 * Copy data from a buffer back into the indicated mbuf chain,
2048 * starting "off" bytes from the beginning, extending the mbuf
2049 * chain if necessary.
2059 struct mbuf
*m
= m0
, *n
;
2064 while (off
> (mlen
= m
->m_len
)) {
2067 if (m
->m_next
== 0) {
2068 n
= m_getclr(M_DONTWAIT
, m
->m_type
);
2071 n
->m_len
= min(MLEN
, len
+ off
);
2077 mlen
= min (m
->m_len
- off
, len
);
2078 bcopy(cp
, off
+ mtod(m
, caddr_t
), (unsigned)mlen
);
2086 if (m
->m_next
== 0) {
2087 n
= m_get(M_DONTWAIT
, m
->m_type
);
2090 n
->m_len
= min(MLEN
, len
);
2095 out
: if (((m
= m0
)->m_flags
& M_PKTHDR
) && (m
->m_pkthdr
.len
< totlen
))
2096 m
->m_pkthdr
.len
= totlen
;
2100 char *mcl_to_paddr(char *addr
) {
2103 if (addr
< (char *)mbutl
|| addr
>= (char *)embutl
)
2105 base_phys
= mcl_paddr
[(addr
- (char *)mbutl
) >> PGSHIFT
];
2109 return ((char *)((int)base_phys
| ((int)addr
& PGOFSET
)));
2113 * Dup the mbuf chain passed in. The whole thing. No cute additional cruft.
2114 * And really copy the thing. That way, we don't "precompute" checksums
2115 * for unsuspecting consumers.
2116 * Assumption: m->m_nextpkt == 0.
2117 * Trick: for small packets, don't dup into a cluster. That way received
2118 * packets don't take up too much room in the sockbuf (cf. sbspace()).
2123 m_dup(struct mbuf
*m
, int how
)
2125 struct mbuf
*n
, **np
;
2131 if (m
->m_flags
& M_PKTHDR
)
2135 * Quick check: if we have one mbuf and its data fits in an
2136 * mbuf with packet header, just copy and go.
2138 if (m
->m_next
== NULL
)
2139 { /* Then just move the data into an mbuf and be done... */
2141 { if (m
->m_pkthdr
.len
<= MHLEN
)
2142 { if ((n
= m_gethdr(how
, m
->m_type
)) == NULL
)
2144 n
->m_len
= m
->m_len
;
2145 m_dup_pkthdr(n
, m
, how
);
2146 bcopy(m
->m_data
, n
->m_data
, m
->m_len
);
2149 } else if (m
->m_len
<= MLEN
)
2150 { if ((n
= m_get(how
, m
->m_type
)) == NULL
)
2152 bcopy(m
->m_data
, n
->m_data
, m
->m_len
);
2153 n
->m_len
= m
->m_len
;
2160 kprintf("<%x: %x, %x, %x\n", m
, m
->m_flags
, m
->m_len
,
2164 n
= m_gethdr(how
, m
->m_type
);
2166 n
= m_get(how
, m
->m_type
);
2169 if (m
->m_flags
& M_EXT
)
2171 if ((n
->m_flags
& M_EXT
) == 0)
2176 { /* Don't use M_COPY_PKTHDR: preserve m_data */
2177 m_dup_pkthdr(n
, m
, how
);
2179 if ((n
->m_flags
& M_EXT
) == 0)
2180 n
->m_data
= n
->m_pktdat
;
2182 n
->m_len
= m
->m_len
;
2184 * Get the dup on the same bdry as the original
2185 * Assume that the two mbufs have the same offset to data area
2186 * (up to word bdries)
2188 bcopy(mtod(m
, caddr_t
), mtod(n
, caddr_t
), (unsigned)n
->m_len
);
2192 kprintf(">%x: %x, %x, %x\n", n
, n
->m_flags
, n
->m_len
,
2207 m_mclref(struct mbuf
*p
)
2209 return (_MCLREF(p
));
2213 m_mclunref(struct mbuf
*p
)
2215 return (_MCLUNREF(p
));
2218 /* change mbuf to new type */
2220 m_mchtype(struct mbuf
*m
, int t
)
2223 mbstat
.m_mtypes
[(m
)->m_type
]--;
2224 mbstat
.m_mtypes
[t
]++;
2229 void *m_mtod(struct mbuf
*m
)
2231 return ((m
)->m_data
);
2234 struct mbuf
*m_dtom(void *x
)
2236 return ((struct mbuf
*)((u_long
)(x
) & ~(MSIZE
-1)));
2239 int m_mtocl(void *x
)
2241 return (((char *)(x
) - (char *)mbutl
) / sizeof(union mcluster
));
2244 union mcluster
*m_cltom(int x
)
2246 return ((union mcluster
*)(mbutl
+ (x
)));
2250 void m_mcheck(struct mbuf
*m
)
2252 if (m
->m_type
!= MT_FREE
)
2253 panic("mget MCHECK: m_type=%x m=%x", m
->m_type
, m
);
2257 mbuf_expand_thread(void)
2261 if (mbuf_expand_mcl
) {
2264 /* Adjust to the current number of cluster in use */
2265 n
= mbuf_expand_mcl
- (mbstat
.m_clusters
- mbstat
.m_clfree
);
2266 mbuf_expand_mcl
= 0;
2269 (void)m_clalloc(n
, M_WAIT
, MCLBYTES
, 1);
2271 if (mbuf_expand_big
) {
2274 /* Adjust to the current number of 4 KB cluster in use */
2275 n
= mbuf_expand_big
- (mbstat
.m_bigclusters
- mbstat
.m_bigclfree
);
2276 mbuf_expand_big
= 0;
2279 (void)m_clalloc(n
, M_WAIT
, NBPG
, 1);
2283 * Because we can run out of memory before filling the mbuf map, we
2284 * should not allocate more clusters than they are mbufs -- otherwise
2285 * we could have a large number of useless clusters allocated.
2287 while (mbstat
.m_mbufs
< mbstat
.m_bigclusters
+ mbstat
.m_clusters
) {
2288 if (m_expand(M_WAIT
) == 0)
2292 assert_wait(&mbuf_expand_thread_wakeup
, THREAD_UNINT
);
2293 (void) thread_block((thread_continue_t
)mbuf_expand_thread
);
2298 mbuf_expand_thread_init(void)
2300 mbuf_expand_thread_initialized
++;
2301 mbuf_expand_thread();
2304 SYSCTL_DECL(_kern_ipc
);
2305 SYSCTL_STRUCT(_kern_ipc
, KIPC_MBSTAT
, mbstat
, CTLFLAG_RW
, &mbstat
, mbstat
, "");