2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1982, 1986, 1988, 1991, 1993
25 * The Regents of the University of California. All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
59 * 10/15/97 Annette DeSchon (deschon@apple.com)
60 * Fixed bug in which all cluster mbufs were broken up
61 * into regular mbufs: Some clusters are now reserved.
62 * When a cluster is needed, regular mbufs are no longer
63 * used. (Radar 1683621)
64 * 20-May-95 Mac Gillon (mgillon) at NeXT
65 * New version based on 4.4
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/malloc.h>
72 #include <sys/kernel.h>
73 #include <sys/sysctl.h>
74 #include <sys/syslog.h>
75 #include <sys/protosw.h>
76 #include <sys/domain.h>
78 #include <kern/queue.h>
79 #include <kern/kern_types.h>
80 #include <kern/sched_prim.h>
82 #include <IOKit/IOMapper.h>
84 extern vm_offset_t
kmem_mb_alloc(vm_map_t
, int );
85 extern boolean_t
PE_parse_boot_arg(const char *, void *);
87 #define _MCLREF(p) (++mclrefcnt[mtocl(p)])
88 #define _MCLUNREF(p) (--mclrefcnt[mtocl(p)] == 0)
89 #define _M_CLEAR_PKTHDR(mbuf_ptr) (mbuf_ptr)->m_pkthdr.rcvif = NULL; \
90 (mbuf_ptr)->m_pkthdr.len = 0; \
91 (mbuf_ptr)->m_pkthdr.header = NULL; \
92 (mbuf_ptr)->m_pkthdr.csum_flags = 0; \
93 (mbuf_ptr)->m_pkthdr.csum_data = 0; \
94 (mbuf_ptr)->m_pkthdr.aux = (struct mbuf*)NULL; \
95 (mbuf_ptr)->m_pkthdr.vlan_tag = 0; \
96 (mbuf_ptr)->m_pkthdr.socket_id = 0; \
97 SLIST_INIT(&(mbuf_ptr)->m_pkthdr.tags);
99 /* kernel translater */
100 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
102 lck_mtx_t
* mbuf_mlock
;
103 lck_grp_t
* mbuf_mlock_grp
;
104 lck_grp_attr_t
* mbuf_mlock_grp_attr
;
105 lck_attr_t
* mbuf_mlock_attr
;
106 extern lck_mtx_t
*domain_proto_mtx
;
108 struct mbuf
*mfree
; /* mbuf free list */
109 struct mbuf
*mfreelater
; /* mbuf deallocation list */
110 extern vm_map_t mb_map
; /* special map */
111 int m_want
; /* sleepers on mbufs */
112 short *mclrefcnt
; /* mapped cluster reference counts */
114 static ppnum_t mcl_paddr_base
; /* Handle returned by IOMapper::iovmAlloc() */
115 union mcluster
*mclfree
; /* mapped cluster free list */
116 union mbigcluster
*mbigfree
; /* mapped cluster free list */
117 int max_linkhdr
; /* largest link-level header */
118 int max_protohdr
; /* largest protocol header */
119 int max_hdr
; /* largest link+protocol header */
120 int max_datalen
; /* MHLEN - max_hdr */
121 struct mbstat mbstat
; /* statistics */
122 union mcluster
*mbutl
; /* first mapped cluster address */
123 union mcluster
*embutl
; /* ending virtual address of mclusters */
125 static int nclpp
; /* # clusters per physical page */
127 static int m_howmany(int, size_t );
128 void m_reclaim(void);
129 static int m_clalloc(const int , const int, const size_t, int);
132 #define MF_NOWAIT 0x1
135 /* The number of cluster mbufs that are allocated, to start. */
136 #define MINCL max(16, 2)
138 static int mbuf_expand_thread_wakeup
= 0;
139 static int mbuf_expand_mcl
= 0;
140 static int mbuf_expand_big
= 0;
141 static int mbuf_expand_thread_initialized
= 0;
143 static void mbuf_expand_thread_init(void);
144 static void mbuf_expand_thread(void);
145 static int m_expand(int );
146 static caddr_t
m_bigalloc(int );
147 static void m_bigfree(caddr_t
, u_int
, caddr_t
);
148 static struct mbuf
* m_mbigget(struct mbuf
*, int );
150 static void m_range_check(void *addr
);
154 static int mfree_munge
= 0;
156 #define _MFREE_MUNGE(m) { \
159 vm_offset_t *element = (vm_offset_t *)(m); \
161 i < sizeof(struct mbuf)/sizeof(vm_offset_t); \
163 (element)[i] = 0xdeadbeef; \
168 munge_mbuf(struct mbuf
*m
)
171 vm_offset_t
*element
= (vm_offset_t
*)(m
);
173 i
< sizeof(struct mbuf
)/sizeof(vm_offset_t
);
175 (element
)[i
] = 0xdeadbeef;
177 #define _MFREE_MUNGE(m) { \
183 #define _MFREE_MUNGE(m)
187 #define _MINTGET(m, type) { \
189 if (((m) = mfree) != 0) { \
191 ++mclrefcnt[mtocl(m)]; \
192 mbstat.m_mtypes[MT_FREE]--; \
193 mbstat.m_mtypes[(type)]++; \
194 mfree = (m)->m_next; \
201 m_range_check(void *addr
)
203 if (addr
&& (addr
< (void *)mbutl
|| addr
>= (void *)embutl
))
204 panic("mbuf address out of range 0x%x", addr
);
207 __private_extern__
void
216 nclpp
= round_page_32(MCLBYTES
) / MCLBYTES
; /* see mbufgc() */
217 if (nclpp
< 1) nclpp
= 1;
218 mbuf_mlock_grp_attr
= lck_grp_attr_alloc_init();
219 lck_grp_attr_setdefault(mbuf_mlock_grp_attr
);
221 mbuf_mlock_grp
= lck_grp_alloc_init("mbuf", mbuf_mlock_grp_attr
);
222 mbuf_mlock_attr
= lck_attr_alloc_init();
223 lck_attr_setdefault(mbuf_mlock_attr
);
225 mbuf_mlock
= lck_mtx_alloc_init(mbuf_mlock_grp
, mbuf_mlock_attr
);
227 mbstat
.m_msize
= MSIZE
;
228 mbstat
.m_mclbytes
= MCLBYTES
;
229 mbstat
.m_minclsize
= MINCLSIZE
;
230 mbstat
.m_mlen
= MLEN
;
231 mbstat
.m_mhlen
= MHLEN
;
232 mbstat
.m_bigmclbytes
= NBPG
;
234 if (nmbclusters
== 0)
235 nmbclusters
= NMBCLUSTERS
;
236 MALLOC(mclrefcnt
, short *, nmbclusters
* sizeof (short),
240 for (m
= 0; m
< nmbclusters
; m
++)
243 /* Calculate the number of pages assigned to the cluster pool */
244 mcl_pages
= nmbclusters
/(NBPG
/CLBYTES
);
245 MALLOC(mcl_paddr
, int *, mcl_pages
* sizeof(int), M_TEMP
, M_WAITOK
);
248 /* Register with the I/O Bus mapper */
249 mcl_paddr_base
= IOMapperIOVMAlloc(mcl_pages
);
250 bzero((char *)mcl_paddr
, mcl_pages
* sizeof(int));
252 embutl
= (union mcluster
*)((unsigned char *)mbutl
+ (nmbclusters
* MCLBYTES
));
254 PE_parse_boot_arg("initmcl", &initmcl
);
256 if (m_clalloc(max(NBPG
/CLBYTES
, 1) * initmcl
, M_WAIT
, MCLBYTES
, 0) == 0)
260 (void) kernel_thread(kernel_task
, mbuf_expand_thread_init
);
268 * Allocate some number of mbuf clusters
269 * and place on cluster free list.
270 * Take the mbuf lock (if not already locked) and do not release it
277 const size_t bufsize
,
283 vm_offset_t page
= 0;
288 * Honor the caller's wish to block or not block.
289 * We have a way to grow the pool asynchronously,
290 * by kicking the dlil_input_thread.
292 i
= m_howmany(num
, bufsize
);
293 if (i
== 0 || nowait
== M_DONTWAIT
)
297 size
= round_page_32(i
* bufsize
);
298 page
= kmem_mb_alloc(mb_map
, size
);
301 size
= NBPG
; /* Try for 1 if failed */
302 page
= kmem_mb_alloc(mb_map
, size
);
307 numpages
= size
/ NBPG
;
308 for (i
= 0; i
< numpages
; i
++, page
+= NBPG
) {
309 if (((int)page
& PGOFSET
) == 0) {
310 ppnum_t offset
= ((char *)page
- (char *)mbutl
)/NBPG
;
311 ppnum_t new_page
= pmap_find_phys(kernel_pmap
, (vm_address_t
) page
);
314 * In the case of no mapper being available
315 * the following code nops and returns the
316 * input page, if there is a mapper the I/O
317 * page appropriate is returned.
319 new_page
= IOMapperInsertPage(mcl_paddr_base
, offset
, new_page
);
320 mcl_paddr
[offset
] = new_page
<< 12;
322 if (bufsize
== MCLBYTES
) {
323 union mcluster
*mcl
= (union mcluster
*)page
;
325 if (++mclrefcnt
[mtocl(mcl
)] != 0)
326 panic("m_clalloc already there");
327 mcl
->mcl_next
= mclfree
;
329 if (++mclrefcnt
[mtocl(mcl
)] != 0)
330 panic("m_clalloc already there");
331 mcl
->mcl_next
= mclfree
;
334 union mbigcluster
*mbc
= (union mbigcluster
*)page
;
336 if (++mclrefcnt
[mtocl(mbc
)] != 0)
337 panic("m_clalloc already there");
338 if (++mclrefcnt
[mtocl(mbc
) + 1] != 0)
339 panic("m_clalloc already there");
341 mbc
->mbc_next
= mbigfree
;
345 if (bufsize
== MCLBYTES
) {
346 int numcl
= numpages
<< 1;
347 mbstat
.m_clfree
+= numcl
;
348 mbstat
.m_clusters
+= numcl
;
351 mbstat
.m_bigclfree
+= numpages
;
352 mbstat
.m_bigclusters
+= numpages
;
358 * When non-blocking we kick a thread if we havve to grow the
359 * pool or if the number of free clusters is less than requested.
361 if (bufsize
== MCLBYTES
) {
363 /* Remember total number of clusters needed at this time */
364 i
+= mbstat
.m_clusters
;
365 if (i
> mbuf_expand_mcl
) {
367 if (mbuf_expand_thread_initialized
)
368 wakeup((caddr_t
)&mbuf_expand_thread_wakeup
);
372 if (mbstat
.m_clfree
>= num
)
376 /* Remember total number of 4KB clusters needed at this time */
377 i
+= mbstat
.m_bigclusters
;
378 if (i
> mbuf_expand_big
) {
380 if (mbuf_expand_thread_initialized
)
381 wakeup((caddr_t
)&mbuf_expand_thread_wakeup
);
385 if (mbstat
.m_bigclfree
>= num
)
392 * Add more free mbufs by cutting up a cluster.
395 m_expand(int canwait
)
399 if (mbstat
.m_clfree
< (mbstat
.m_clusters
>> 4)) {
401 * 1/16th of the total number of cluster mbufs allocated is
402 * reserved for large packets. The number reserved must
403 * always be < 1/2, or future allocation will be prevented.
405 (void)m_clalloc(1, canwait
, MCLBYTES
, 0);
407 if (mbstat
.m_clfree
< (mbstat
.m_clusters
>> 4))
411 MCLALLOC(mcl
, canwait
);
413 struct mbuf
*m
= (struct mbuf
*)mcl
;
416 mbstat
.m_mtypes
[MT_FREE
] += i
;
427 if (i
) wakeup((caddr_t
)&mfree
);
434 * When MGET failes, ask protocols to free space when short of memory,
435 * then re-attempt to allocate an mbuf.
446 (void) m_expand(canwait
);
449 (m
)->m_next
= (m
)->m_nextpkt
= 0;
450 (m
)->m_type
= (type
);
451 (m
)->m_data
= (m
)->m_dat
;
455 if (m
|| canwait
== M_DONTWAIT
)
466 if (mbuf_expand_thread_initialized
)
467 wakeup((caddr_t
)&mbuf_expand_thread_wakeup
);
475 (void) msleep((caddr_t
)&mfree
, 0, (PZERO
-1) | PDROP
, "m_retry", &ts
);
484 * As above; retry an MGETHDR.
493 if ((m
= m_retry(canwait
, type
))) {
494 m
->m_next
= m
->m_nextpkt
= 0;
495 m
->m_flags
|= M_PKTHDR
;
496 m
->m_data
= m
->m_pktdat
;
505 do_reclaim
= 1; /* drain is performed in pfslowtimo(), to avoid deadlocks */
510 * Space allocation routines.
511 * These are also available as macros
512 * for critical paths.
521 m_range_check(mfree
);
522 m_range_check(mclfree
);
523 m_range_check(mbigfree
);
527 m
->m_next
= m
->m_nextpkt
= 0;
529 m
->m_data
= m
->m_dat
;
533 (m
) = m_retry(nowait
, type
);
535 m_range_check(mfree
);
536 m_range_check(mclfree
);
537 m_range_check(mbigfree
);
550 m_range_check(mfree
);
551 m_range_check(mclfree
);
552 m_range_check(mbigfree
);
557 m
->m_next
= m
->m_nextpkt
= 0;
559 m
->m_data
= m
->m_pktdat
;
560 m
->m_flags
= M_PKTHDR
;
564 m
= m_retryhdr(nowait
, type
);
566 m_range_check(mfree
);
567 m_range_check(mclfree
);
568 m_range_check(mbigfree
);
581 MGET(m
, nowait
, type
);
584 bzero(mtod(m
, caddr_t
), MLEN
);
592 struct mbuf
*n
= m
->m_next
;
596 m_range_check(mfree
);
597 m_range_check(mclfree
);
599 if (m
->m_type
== MT_FREE
)
600 panic("freeing free mbuf");
602 /* Free the aux data if there is any */
603 if ((m
->m_flags
& M_PKTHDR
) && m
->m_pkthdr
.aux
)
605 m_freem(m
->m_pkthdr
.aux
);
607 if ((m
->m_flags
& M_PKTHDR
) != 0)
608 m_tag_delete_chain(m
, NULL
);
611 if ((m
->m_flags
& M_EXT
))
613 if (MCLHASREFERENCE(m
)) {
614 remque((queue_t
)&m
->m_ext
.ext_refs
);
615 } else if (m
->m_ext
.ext_free
== NULL
) {
616 union mcluster
*mcl
= (union mcluster
*)m
->m_ext
.ext_buf
;
620 if (_MCLUNREF(mcl
)) {
621 mcl
->mcl_next
= mclfree
;
626 /* *** Since m_split() increments "mclrefcnt[mtocl(m->m_ext.ext_buf)]",
627 and AppleTalk ADSP uses m_split(), this incorrect sanity check
630 else /* sanity check - not referenced this way */
631 panic("m_free m_ext cluster not free");
634 (*(m
->m_ext
.ext_free
))(m
->m_ext
.ext_buf
,
635 m
->m_ext
.ext_size
, m
->m_ext
.ext_arg
);
638 mbstat
.m_mtypes
[m
->m_type
]--;
642 mbstat
.m_mtypes
[m
->m_type
]++;
650 if (i
) wakeup((caddr_t
)&mfree
);
654 /* m_mclget() add an mbuf cluster to a normal mbuf */
660 MCLALLOC(m
->m_ext
.ext_buf
, nowait
);
661 if (m
->m_ext
.ext_buf
) {
662 m
->m_data
= m
->m_ext
.ext_buf
;
664 m
->m_ext
.ext_size
= MCLBYTES
;
665 m
->m_ext
.ext_free
= 0;
666 m
->m_ext
.ext_refs
.forward
= m
->m_ext
.ext_refs
.backward
=
673 /* m_mclalloc() allocate an mbuf cluster */
680 (void)m_clalloc(1, nowait
, MCLBYTES
, 0);
681 if ((p
= (caddr_t
)mclfree
)) {
682 ++mclrefcnt
[mtocl(p
)];
684 mclfree
= ((union mcluster
*)p
)->mcl_next
;
693 /* m_mclfree() releases a reference to a cluster allocated by MCLALLOC,
694 * freeing the cluster if the reference count has reached 0. */
703 if (--mclrefcnt
[mtocl(p
)] == 0) {
704 ((union mcluster
*)(p
))->mcl_next
= mclfree
;
705 mclfree
= (union mcluster
*)(p
);
711 /* mcl_hasreference() checks if a cluster of an mbuf is referenced by another mbuf */
716 return (m
->m_ext
.ext_refs
.forward
!= &(m
->m_ext
.ext_refs
));
719 __private_extern__ caddr_t
720 m_bigalloc(int nowait
)
724 (void)m_clalloc(1, nowait
, NBPG
, 0);
725 if ((p
= (caddr_t
)mbigfree
)) {
726 if (mclrefcnt
[mtocl(p
)] != mclrefcnt
[mtocl(p
) + 1])
727 panic("m_bigalloc mclrefcnt %x mismatch %d != %d",
728 p
, mclrefcnt
[mtocl(p
)], mclrefcnt
[mtocl(p
) + 1]);
729 if (mclrefcnt
[mtocl(p
)] || mclrefcnt
[mtocl(p
) + 1])
730 panic("m_bigalloc mclrefcnt %x not null %d != %d",
731 p
, mclrefcnt
[mtocl(p
)], mclrefcnt
[mtocl(p
) + 1]);
732 ++mclrefcnt
[mtocl(p
)];
733 ++mclrefcnt
[mtocl(p
) + 1];
734 mbstat
.m_bigclfree
--;
735 mbigfree
= ((union mbigcluster
*)p
)->mbc_next
;
743 __private_extern__
void
744 m_bigfree(caddr_t p
, __unused u_int size
, __unused caddr_t arg
)
748 if (mclrefcnt
[mtocl(p
)] != mclrefcnt
[mtocl(p
) + 1])
749 panic("m_bigfree mclrefcnt %x mismatch %d != %d",
750 p
, mclrefcnt
[mtocl(p
)], mclrefcnt
[mtocl(p
) + 1]);
751 --mclrefcnt
[mtocl(p
)];
752 --mclrefcnt
[mtocl(p
) + 1];
753 if (mclrefcnt
[mtocl(p
)] == 0) {
754 ((union mbigcluster
*)(p
))->mbc_next
= mbigfree
;
755 mbigfree
= (union mbigcluster
*)(p
);
756 mbstat
.m_bigclfree
++;
760 /* m_mbigget() add an 4KB mbuf cluster to a normal mbuf */
761 __private_extern__
struct mbuf
*
762 m_mbigget(struct mbuf
*m
, int nowait
)
764 m
->m_ext
.ext_buf
= m_bigalloc(nowait
);
765 if (m
->m_ext
.ext_buf
) {
766 m
->m_data
= m
->m_ext
.ext_buf
;
768 m
->m_ext
.ext_size
= NBPG
;
769 m
->m_ext
.ext_free
= m_bigfree
;
770 m
->m_ext
.ext_arg
= 0;
771 m
->m_ext
.ext_refs
.forward
= m
->m_ext
.ext_refs
.backward
=
785 to
->m_pkthdr
= from
->m_pkthdr
;
786 from
->m_pkthdr
.aux
= (struct mbuf
*)NULL
;
787 SLIST_INIT(&from
->m_pkthdr
.tags
); /* purge tags from src */
788 to
->m_flags
= from
->m_flags
& M_COPYFLAGS
;
789 to
->m_data
= (to
)->m_pktdat
;
793 * "Move" mbuf pkthdr from "from" to "to".
794 * "from" must have M_PKTHDR set, and "to" must be empty.
798 m_move_pkthdr(struct mbuf
*to
, struct mbuf
*from
)
800 KASSERT((to
->m_flags
& M_EXT
) == 0, ("m_move_pkthdr: to has cluster"));
802 to
->m_flags
= from
->m_flags
& M_COPYFLAGS
;
803 to
->m_data
= to
->m_pktdat
;
804 to
->m_pkthdr
= from
->m_pkthdr
; /* especially tags */
805 SLIST_INIT(&from
->m_pkthdr
.tags
); /* purge tags from src */
806 from
->m_flags
&= ~M_PKTHDR
;
811 * Duplicate "from"'s mbuf pkthdr in "to".
812 * "from" must have M_PKTHDR set, and "to" must be empty.
813 * In particular, this does a deep copy of the packet tags.
816 m_dup_pkthdr(struct mbuf
*to
, struct mbuf
*from
, int how
)
818 to
->m_flags
= (from
->m_flags
& M_COPYFLAGS
) | (to
->m_flags
& M_EXT
);
819 if ((to
->m_flags
& M_EXT
) == 0)
820 to
->m_data
= to
->m_pktdat
;
821 to
->m_pkthdr
= from
->m_pkthdr
;
822 SLIST_INIT(&to
->m_pkthdr
.tags
);
823 return (m_tag_copy_chain(to
, from
, how
));
827 * return a list of mbuf hdrs that point to clusters...
828 * try for num_needed, if wantall is not set, return whatever
829 * number were available... set up the first num_with_pkthdrs
830 * with mbuf hdrs configured as packet headers... these are
831 * chained on the m_nextpkt field... any packets requested beyond
832 * this are chained onto the last packet header's m_next field.
833 * The size of the cluster is controlled by the paramter bufsize.
835 __private_extern__
struct mbuf
*
836 m_getpackets_internal(unsigned int *num_needed
, int num_with_pkthdrs
, int how
, int wantall
, size_t bufsize
)
839 struct mbuf
**np
, *top
;
840 unsigned int num
, needed
= *num_needed
;
842 if (bufsize
!= MCLBYTES
&& bufsize
!= NBPG
)
848 (void)m_clalloc(needed
, how
, bufsize
, 0); /* takes the MBUF_LOCK, but doesn't release it... */
850 for (num
= 0; num
< needed
; num
++) {
851 m_range_check(mfree
);
852 m_range_check(mclfree
);
853 m_range_check(mbigfree
);
855 if (mfree
&& ((bufsize
== NBPG
&& mbigfree
) || (bufsize
== MCLBYTES
&& mclfree
))) {
856 /* mbuf + cluster are available */
860 ++mclrefcnt
[mtocl(m
)];
861 mbstat
.m_mtypes
[MT_FREE
]--;
862 mbstat
.m_mtypes
[MT_DATA
]++;
863 if (bufsize
== NBPG
) {
864 m
->m_ext
.ext_buf
= (caddr_t
)mbigfree
; /* get the big cluster */
865 ++mclrefcnt
[mtocl(m
->m_ext
.ext_buf
)];
866 ++mclrefcnt
[mtocl(m
->m_ext
.ext_buf
) + 1];
867 mbstat
.m_bigclfree
--;
868 mbigfree
= ((union mbigcluster
*)(m
->m_ext
.ext_buf
))->mbc_next
;
869 m
->m_ext
.ext_free
= m_bigfree
;
870 m
->m_ext
.ext_size
= NBPG
;
872 m
->m_ext
.ext_buf
= (caddr_t
)mclfree
; /* get the cluster */
873 ++mclrefcnt
[mtocl(m
->m_ext
.ext_buf
)];
875 mclfree
= ((union mcluster
*)(m
->m_ext
.ext_buf
))->mcl_next
;
876 m
->m_ext
.ext_free
= 0;
877 m
->m_ext
.ext_size
= MCLBYTES
;
879 m
->m_ext
.ext_arg
= 0;
880 m
->m_ext
.ext_refs
.forward
= m
->m_ext
.ext_refs
.backward
= &m
->m_ext
.ext_refs
;
881 m
->m_next
= m
->m_nextpkt
= 0;
883 m
->m_data
= m
->m_ext
.ext_buf
;
886 if (num_with_pkthdrs
== 0)
889 m
->m_flags
= M_PKTHDR
| M_EXT
;
897 if (num_with_pkthdrs
== 0) {
898 MGET(m
, how
, MT_DATA
);
900 MGETHDR(m
, how
, MT_DATA
);
908 m
= m_mbigget(m
, how
);
910 m
= m_mclget(m
, how
);
911 if ((m
->m_flags
& M_EXT
) == 0) {
919 if (num_with_pkthdrs
)
929 if (wantall
&& top
) {
938 * Return list of mbuf linked by m_nextpkt
939 * Try for num_needed, and if wantall is not set, return whatever
940 * number were available
941 * The size of each mbuf in the list is controlled by the parameter packetlen.
942 * Each mbuf of the list may have a chain of mbufs linked by m_next. Each mbuf in
943 * the chain is called a segment.
944 * If maxsegments is not null and the value pointed to is not null, this specify
945 * the maximum number of segments for a chain of mbufs.
946 * If maxsegments is zero or the value pointed to is zero the
947 * caller does not have any restriction on the number of segments.
948 * The actual number of segments of a mbuf chain is return in the value pointed
950 * When possible the allocation is done under a single lock.
953 __private_extern__
struct mbuf
*
954 m_allocpacket_internal(unsigned int *num_needed
, size_t packetlen
, unsigned int * maxsegments
,
955 int how
, int wantall
, size_t wantsize
)
957 struct mbuf
**np
, *top
;
960 unsigned int numchunks
= 0;
966 if (packetlen
<= MINCLSIZE
)
968 else if (packetlen
> MCLBYTES
)
972 } else if (wantsize
== MCLBYTES
|| wantsize
== NBPG
)
977 if (bufsize
<= MHLEN
) {
979 } else if (bufsize
<= MINCLSIZE
) {
980 if (maxsegments
!= NULL
&& *maxsegments
== 1) {
986 } else if (bufsize
== NBPG
) {
987 numchunks
= ((packetlen
- 1) >> PGSHIFT
) + 1;
989 numchunks
= ((packetlen
- 1) >> MCLSHIFT
) + 1;
991 if (maxsegments
!= NULL
) {
992 if (*maxsegments
&& numchunks
> *maxsegments
) {
993 *maxsegments
= numchunks
;
996 *maxsegments
= numchunks
;
998 /* m_clalloc takes the MBUF_LOCK, but do not release it */
999 (void)m_clalloc(numchunks
, how
, (bufsize
== NBPG
) ? NBPG
: MCLBYTES
, 0);
1000 for (num
= 0; num
< *num_needed
; num
++) {
1001 struct mbuf
**nm
, *pkt
= 0;
1006 m_range_check(mfree
);
1007 m_range_check(mclfree
);
1008 m_range_check(mbigfree
);
1010 for (len
= 0; len
< packetlen
; ) {
1011 struct mbuf
*m
= NULL
;
1013 if (wantsize
== 0 && packetlen
> MINCLSIZE
) {
1014 if (packetlen
- len
> MCLBYTES
)
1021 if (mfree
&& ((bufsize
== NBPG
&& mbigfree
) || (bufsize
== MCLBYTES
&& mclfree
))) {
1022 /* mbuf + cluster are available */
1026 ++mclrefcnt
[mtocl(m
)];
1027 mbstat
.m_mtypes
[MT_FREE
]--;
1028 mbstat
.m_mtypes
[MT_DATA
]++;
1029 if (bufsize
== NBPG
) {
1030 m
->m_ext
.ext_buf
= (caddr_t
)mbigfree
; /* get the big cluster */
1031 ++mclrefcnt
[mtocl(m
->m_ext
.ext_buf
)];
1032 ++mclrefcnt
[mtocl(m
->m_ext
.ext_buf
) + 1];
1033 mbstat
.m_bigclfree
--;
1034 mbigfree
= ((union mbigcluster
*)(m
->m_ext
.ext_buf
))->mbc_next
;
1035 m
->m_ext
.ext_free
= m_bigfree
;
1036 m
->m_ext
.ext_size
= NBPG
;
1038 m
->m_ext
.ext_buf
= (caddr_t
)mclfree
; /* get the cluster */
1039 ++mclrefcnt
[mtocl(m
->m_ext
.ext_buf
)];
1041 mclfree
= ((union mcluster
*)(m
->m_ext
.ext_buf
))->mcl_next
;
1042 m
->m_ext
.ext_free
= 0;
1043 m
->m_ext
.ext_size
= MCLBYTES
;
1045 m
->m_ext
.ext_arg
= 0;
1046 m
->m_ext
.ext_refs
.forward
= m
->m_ext
.ext_refs
.backward
= &m
->m_ext
.ext_refs
;
1047 m
->m_next
= m
->m_nextpkt
= 0;
1048 m
->m_type
= MT_DATA
;
1049 m
->m_data
= m
->m_ext
.ext_buf
;
1054 m
->m_flags
= M_PKTHDR
| M_EXT
;
1063 MGETHDR(m
, how
, MT_DATA
);
1065 MGET(m
, how
, MT_DATA
);
1071 if (bufsize
<= MINCLSIZE
) {
1072 if (bufsize
> MHLEN
) {
1073 MGET(m
->m_next
, how
, MT_DATA
);
1074 if (m
->m_next
== 0) {
1081 if (bufsize
== NBPG
)
1082 m
= m_mbigget(m
, how
);
1084 m
= m_mclget(m
, how
);
1085 if ((m
->m_flags
& M_EXT
) == 0) {
1097 np
= &pkt
->m_nextpkt
;
1104 if (wantall
&& top
) {
1114 /* Best effort to get a mbuf cluster + pkthdr under one lock.
1115 * If we don't have them avail, just bail out and use the regular
1117 * Used by drivers to allocated packets on receive ring.
1119 __private_extern__
struct mbuf
*
1120 m_getpacket_how(int how
)
1122 unsigned int num_needed
= 1;
1124 return m_getpackets_internal(&num_needed
, 1, how
, 1, MCLBYTES
);
1127 /* Best effort to get a mbuf cluster + pkthdr under one lock.
1128 * If we don't have them avail, just bail out and use the regular
1130 * Used by drivers to allocated packets on receive ring.
1135 unsigned int num_needed
= 1;
1137 return m_getpackets_internal(&num_needed
, 1, M_WAITOK
, 1, MCLBYTES
);
1142 * return a list of mbuf hdrs that point to clusters...
1143 * try for num_needed, if this can't be met, return whatever
1144 * number were available... set up the first num_with_pkthdrs
1145 * with mbuf hdrs configured as packet headers... these are
1146 * chained on the m_nextpkt field... any packets requested beyond
1147 * this are chained onto the last packet header's m_next field.
1150 m_getpackets(int num_needed
, int num_with_pkthdrs
, int how
)
1152 unsigned int n
= num_needed
;
1154 return m_getpackets_internal(&n
, num_with_pkthdrs
, how
, 0, MCLBYTES
);
1159 * return a list of mbuf hdrs set up as packet hdrs
1160 * chained together on the m_nextpkt field
1163 m_getpackethdrs(int num_needed
, int how
)
1166 struct mbuf
**np
, *top
;
1173 while (num_needed
--) {
1174 m_range_check(mfree
);
1175 m_range_check(mclfree
);
1176 m_range_check(mbigfree
);
1178 if ((m
= mfree
)) { /* mbufs are available */
1181 ++mclrefcnt
[mtocl(m
)];
1182 mbstat
.m_mtypes
[MT_FREE
]--;
1183 mbstat
.m_mtypes
[MT_DATA
]++;
1185 m
->m_next
= m
->m_nextpkt
= 0;
1186 m
->m_type
= MT_DATA
;
1187 m
->m_flags
= M_PKTHDR
;
1189 m
->m_data
= m
->m_pktdat
;
1195 m
= m_retryhdr(how
, MT_DATA
);
1209 /* free and mbuf list (m_nextpkt) while following m_next under one lock.
1210 * returns the count for mbufs packets freed. Used by the drivers.
1216 struct mbuf
*nextpkt
;
1223 nextpkt
= m
->m_nextpkt
; /* chain of linked mbufs from driver */
1229 while (m
) { /* free the mbuf chain (like mfreem) */
1234 m_range_check(mfree
);
1235 m_range_check(mclfree
);
1236 m_range_check(mbigfree
);
1239 /* Free the aux data if there is any */
1240 if ((m
->m_flags
& M_PKTHDR
) && m
->m_pkthdr
.aux
) {
1242 * Treat the current m as the nextpkt and set m
1243 * to the aux data. Preserve nextpkt in m->m_nextpkt.
1244 * This lets us free the aux data in this loop
1245 * without having to call m_freem recursively,
1246 * which wouldn't work because we've still got
1249 m
->m_nextpkt
= nextpkt
;
1251 m
= nextpkt
->m_pkthdr
.aux
;
1252 nextpkt
->m_pkthdr
.aux
= NULL
;
1255 if ((m
->m_flags
& M_PKTHDR
) != 0 && !SLIST_EMPTY(&m
->m_pkthdr
.tags
)) {
1256 /* A quick (albeit inefficient) expedient */
1258 m_tag_delete_chain(m
, NULL
);
1264 if (n
&& n
->m_nextpkt
)
1265 panic("m_freem_list: m_nextpkt of m_next != NULL");
1266 if (m
->m_type
== MT_FREE
)
1267 panic("freeing free mbuf");
1269 if (m
->m_flags
& M_EXT
) {
1270 if (MCLHASREFERENCE(m
)) {
1271 remque((queue_t
)&m
->m_ext
.ext_refs
);
1272 } else if (m
->m_ext
.ext_free
== NULL
) {
1273 union mcluster
*mcl
= (union mcluster
*)m
->m_ext
.ext_buf
;
1277 if (_MCLUNREF(mcl
)) {
1278 mcl
->mcl_next
= mclfree
;
1283 (*(m
->m_ext
.ext_free
))(m
->m_ext
.ext_buf
,
1284 m
->m_ext
.ext_size
, m
->m_ext
.ext_arg
);
1287 mbstat
.m_mtypes
[m
->m_type
]--;
1288 (void) _MCLUNREF(m
);
1290 mbstat
.m_mtypes
[MT_FREE
]++;
1291 m
->m_type
= MT_FREE
;
1298 m
= nextpkt
; /* bump m with saved nextpkt if any */
1306 wakeup((caddr_t
)&mfree
);
1320 * Mbuffer utility routines.
1323 * Compute the amount of space available
1324 * before the current start of data in an mbuf.
1330 if (m
->m_flags
& M_EXT
) {
1331 if (MCLHASREFERENCE(m
))
1333 return (m
->m_data
- m
->m_ext
.ext_buf
);
1335 if (m
->m_flags
& M_PKTHDR
)
1336 return (m
->m_data
- m
->m_pktdat
);
1337 return (m
->m_data
- m
->m_dat
);
1341 * Compute the amount of space available
1342 * after the end of data in an mbuf.
1348 if (m
->m_flags
& M_EXT
) {
1349 if (MCLHASREFERENCE(m
))
1351 return (m
->m_ext
.ext_buf
+ m
->m_ext
.ext_size
-
1352 (m
->m_data
+ m
->m_len
));
1354 return (&m
->m_dat
[MLEN
] - (m
->m_data
+ m
->m_len
));
1358 * Lesser-used path for M_PREPEND:
1359 * allocate new mbuf to prepend to chain,
1361 * Does not adjust packet header length.
1371 MGET(mn
, how
, m
->m_type
);
1372 if (mn
== (struct mbuf
*)NULL
) {
1374 return ((struct mbuf
*)NULL
);
1376 if (m
->m_flags
& M_PKTHDR
) {
1377 M_COPY_PKTHDR(mn
, m
);
1378 m
->m_flags
&= ~M_PKTHDR
;
1389 * Replacement for old M_PREPEND macro:
1390 * allocate new mbuf to prepend to chain,
1391 * copy junk along, and adjust length.
1400 if (M_LEADINGSPACE(m
) >= len
) {
1404 m
= m_prepend(m
, len
, how
);
1406 if ((m
) && (m
->m_flags
& M_PKTHDR
))
1407 m
->m_pkthdr
.len
+= len
;
1412 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1413 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
1414 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
1425 struct mbuf
*n
, **np
;
1430 if (off
< 0 || len
< 0)
1432 if (off
== 0 && m
->m_flags
& M_PKTHDR
)
1435 while (off
>= m
->m_len
) {
1447 m_range_check(mfree
);
1448 m_range_check(mclfree
);
1449 m_range_check(mbigfree
);
1452 if (len
!= M_COPYALL
)
1458 ++mclrefcnt
[mtocl(n
)];
1459 mbstat
.m_mtypes
[MT_FREE
]--;
1460 mbstat
.m_mtypes
[m
->m_type
]++;
1462 n
->m_next
= n
->m_nextpkt
= 0;
1463 n
->m_type
= m
->m_type
;
1464 n
->m_data
= n
->m_dat
;
1468 n
= m_retry(wait
, m
->m_type
);
1476 M_COPY_PKTHDR(n
, m
);
1477 if (len
== M_COPYALL
)
1478 n
->m_pkthdr
.len
-= off0
;
1480 n
->m_pkthdr
.len
= len
;
1483 if (len
== M_COPYALL
) {
1484 if (min(len
, (m
->m_len
- off
)) == len
) {
1485 printf("m->m_len %d - off %d = %d, %d\n",
1486 m
->m_len
, off
, m
->m_len
- off
,
1487 min(len
, (m
->m_len
- off
)));
1490 n
->m_len
= min(len
, (m
->m_len
- off
));
1491 if (n
->m_len
== M_COPYALL
) {
1492 printf("n->m_len == M_COPYALL, fixing\n");
1495 if (m
->m_flags
& M_EXT
) {
1496 n
->m_ext
= m
->m_ext
;
1497 insque((queue_t
)&n
->m_ext
.ext_refs
, (queue_t
)&m
->m_ext
.ext_refs
);
1498 n
->m_data
= m
->m_data
+ off
;
1499 n
->m_flags
|= M_EXT
;
1501 bcopy(mtod(m
, caddr_t
)+off
, mtod(n
, caddr_t
),
1502 (unsigned)n
->m_len
);
1504 if (len
!= M_COPYALL
)
1526 * equivilent to m_copym except that all necessary
1527 * mbuf hdrs are allocated within this routine
1528 * also, the last mbuf and offset accessed are passed
1529 * out and can be passed back in to avoid having to
1530 * rescan the entire mbuf list (normally hung off of the socket)
1538 struct mbuf
**m_last
,
1541 struct mbuf
*n
, **np
= 0;
1543 struct mbuf
*top
= 0;
1547 if (off
== 0 && m
->m_flags
& M_PKTHDR
)
1554 while (off
>= m
->m_len
) {
1563 m_range_check(mfree
);
1564 m_range_check(mclfree
);
1565 m_range_check(mbigfree
);
1571 panic("m_gethdr_and_copym");
1576 ++mclrefcnt
[mtocl(n
)];
1577 mbstat
.m_mtypes
[MT_FREE
]--;
1578 mbstat
.m_mtypes
[type
]++;
1580 n
->m_next
= n
->m_nextpkt
= 0;
1584 n
->m_data
= n
->m_dat
;
1587 n
->m_data
= n
->m_pktdat
;
1588 n
->m_flags
= M_PKTHDR
;
1594 n
= m_retry(wait
, type
);
1596 n
= m_retryhdr(wait
, type
);
1609 M_COPY_PKTHDR(n
, m
);
1610 n
->m_pkthdr
.len
= len
;
1613 n
->m_len
= min(len
, (m
->m_len
- off
));
1615 if (m
->m_flags
& M_EXT
) {
1616 n
->m_ext
= m
->m_ext
;
1617 insque((queue_t
)&n
->m_ext
.ext_refs
, (queue_t
)&m
->m_ext
.ext_refs
);
1618 n
->m_data
= m
->m_data
+ off
;
1619 n
->m_flags
|= M_EXT
;
1621 bcopy(mtod(m
, caddr_t
)+off
, mtod(n
, caddr_t
),
1622 (unsigned)n
->m_len
);
1627 if ((off
+ n
->m_len
) == m
->m_len
) {
1628 *m_last
= m
->m_next
;
1632 *m_off
= off
+ n
->m_len
;
1654 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1655 * continuing for "len" bytes, into the indicated buffer.
1665 if (off
< 0 || len
< 0)
1666 panic("m_copydata");
1669 panic("m_copydata");
1677 panic("m_copydata");
1678 count
= min(m
->m_len
- off
, len
);
1679 bcopy(mtod(m
, caddr_t
) + off
, cp
, count
);
1688 * Concatenate mbuf chain n to m.
1689 * Both chains must be of the same type (e.g. MT_DATA).
1690 * Any m_pkthdr is not updated.
1693 struct mbuf
*m
, struct mbuf
*n
)
1698 if (m
->m_flags
& M_EXT
||
1699 m
->m_data
+ m
->m_len
+ n
->m_len
>= &m
->m_dat
[MLEN
]) {
1700 /* just join the two chains */
1704 /* splat the data from one into the other */
1705 bcopy(mtod(n
, caddr_t
), mtod(m
, caddr_t
) + m
->m_len
,
1707 m
->m_len
+= n
->m_len
;
1721 if ((m
= mp
) == NULL
)
1727 while (m
!= NULL
&& len
> 0) {
1728 if (m
->m_len
<= len
) {
1739 if (m
->m_flags
& M_PKTHDR
)
1740 m
->m_pkthdr
.len
-= (req_len
- len
);
1743 * Trim from tail. Scan the mbuf chain,
1744 * calculating its length and finding the last mbuf.
1745 * If the adjustment only affects this mbuf, then just
1746 * adjust and return. Otherwise, rescan and truncate
1747 * after the remaining size.
1753 if (m
->m_next
== (struct mbuf
*)0)
1757 if (m
->m_len
>= len
) {
1760 if (m
->m_flags
& M_PKTHDR
)
1761 m
->m_pkthdr
.len
-= len
;
1768 * Correct length for chain is "count".
1769 * Find the mbuf with last data, adjust its length,
1770 * and toss data from remaining mbufs on chain.
1773 if (m
->m_flags
& M_PKTHDR
)
1774 m
->m_pkthdr
.len
= count
;
1775 for (; m
; m
= m
->m_next
) {
1776 if (m
->m_len
>= count
) {
1782 while ((m
= m
->m_next
))
1788 * Rearange an mbuf chain so that len bytes are contiguous
1789 * and in the data area of an mbuf (so that mtod and dtom
1790 * will work for a structure of size len). Returns the resulting
1791 * mbuf chain on success, frees it and returns null on failure.
1792 * If there is room, it will add up to max_protohdr-len extra bytes to the
1793 * contiguous region in an attempt to avoid being called next time.
1807 * If first mbuf has no cluster, and has room for len bytes
1808 * without shifting current data, pullup into it,
1809 * otherwise allocate a new mbuf to prepend to the chain.
1811 if ((n
->m_flags
& M_EXT
) == 0 &&
1812 n
->m_data
+ len
< &n
->m_dat
[MLEN
] && n
->m_next
) {
1813 if (n
->m_len
>= len
)
1821 MGET(m
, M_DONTWAIT
, n
->m_type
);
1825 if (n
->m_flags
& M_PKTHDR
) {
1826 M_COPY_PKTHDR(m
, n
);
1827 n
->m_flags
&= ~M_PKTHDR
;
1830 space
= &m
->m_dat
[MLEN
] - (m
->m_data
+ m
->m_len
);
1832 count
= min(min(max(len
, max_protohdr
), space
), n
->m_len
);
1833 bcopy(mtod(n
, caddr_t
), mtod(m
, caddr_t
) + m
->m_len
,
1843 } while (len
> 0 && n
);
1857 * Partition an mbuf chain in two pieces, returning the tail --
1858 * all but the first len0 bytes. In case of failure, it returns NULL and
1859 * attempts to restore the chain to its original state.
1868 unsigned len
= len0
, remain
;
1870 for (m
= m0
; m
&& len
> m
->m_len
; m
= m
->m_next
)
1874 remain
= m
->m_len
- len
;
1875 if (m0
->m_flags
& M_PKTHDR
) {
1876 MGETHDR(n
, wait
, m0
->m_type
);
1879 n
->m_pkthdr
.rcvif
= m0
->m_pkthdr
.rcvif
;
1880 n
->m_pkthdr
.len
= m0
->m_pkthdr
.len
- len0
;
1881 m0
->m_pkthdr
.len
= len0
;
1882 if (m
->m_flags
& M_EXT
)
1884 if (remain
> MHLEN
) {
1885 /* m can't be the lead packet */
1887 n
->m_next
= m_split(m
, len
, wait
);
1888 if (n
->m_next
== 0) {
1894 MH_ALIGN(n
, remain
);
1895 } else if (remain
== 0) {
1900 MGET(n
, wait
, m
->m_type
);
1906 if (m
->m_flags
& M_EXT
) {
1907 n
->m_flags
|= M_EXT
;
1909 n
->m_ext
= m
->m_ext
;
1910 insque((queue_t
)&n
->m_ext
.ext_refs
, (queue_t
)&m
->m_ext
.ext_refs
);
1912 n
->m_data
= m
->m_data
+ len
;
1914 bcopy(mtod(m
, caddr_t
) + len
, mtod(n
, caddr_t
), remain
);
1918 n
->m_next
= m
->m_next
;
1923 * Routine to copy from device local memory into mbufs.
1931 void (*copy
)(const void *, void *, size_t))
1934 struct mbuf
*top
= 0, **mp
= &top
;
1935 int off
= off0
, len
;
1943 * If 'off' is non-zero, packet is trailer-encapsulated,
1944 * so we have to skip the type and length fields.
1946 cp
+= off
+ 2 * sizeof(u_int16_t
);
1947 totlen
-= 2 * sizeof(u_int16_t
);
1949 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
1952 m
->m_pkthdr
.rcvif
= ifp
;
1953 m
->m_pkthdr
.len
= totlen
;
1956 while (totlen
> 0) {
1958 MGET(m
, M_DONTWAIT
, MT_DATA
);
1965 len
= min(totlen
, epkt
- cp
);
1966 if (len
>= MINCLSIZE
) {
1967 MCLGET(m
, M_DONTWAIT
);
1968 if (m
->m_flags
& M_EXT
)
1969 m
->m_len
= len
= min(len
, MCLBYTES
);
1971 /* give up when it's out of cluster mbufs */
1979 * Place initial small packet/header at end of mbuf.
1981 if (len
< m
->m_len
) {
1982 if (top
== 0 && len
+ max_linkhdr
<= m
->m_len
)
1983 m
->m_data
+= max_linkhdr
;
1989 copy(cp
, mtod(m
, caddr_t
), (unsigned)len
);
1991 bcopy(cp
, mtod(m
, caddr_t
), (unsigned)len
);
2003 * Cluster freelist allocation check. The mbuf lock must be held.
2004 * Ensure hysteresis between hi/lo.
2007 m_howmany(int num
, size_t bufsize
)
2011 /* Bail if we've maxed out the mbuf memory map */
2012 if (mbstat
.m_clusters
+ (mbstat
.m_bigclusters
<< 1) < nmbclusters
) {
2015 if (bufsize
== MCLBYTES
) {
2017 if (mbstat
.m_clusters
< MINCL
)
2018 return (MINCL
- mbstat
.m_clusters
);
2019 /* Too few (free < 1/2 total) and not over maximum */
2020 if (mbstat
.m_clusters
< (nmbclusters
>> 1)) {
2021 if (num
>= mbstat
.m_clfree
)
2022 i
= num
- mbstat
.m_clfree
;
2023 if (((mbstat
.m_clusters
+ num
) >> 1) > mbstat
.m_clfree
)
2024 j
= ((mbstat
.m_clusters
+ num
) >> 1) - mbstat
.m_clfree
;
2026 if (i
+ mbstat
.m_clusters
>= (nmbclusters
>> 1))
2027 i
= (nmbclusters
>> 1) - mbstat
.m_clusters
;
2031 if (mbstat
.m_bigclusters
< MINCL
)
2032 return (MINCL
- mbstat
.m_bigclusters
);
2033 /* Too few (free < 1/2 total) and not over maximum */
2034 if (mbstat
.m_bigclusters
< (nmbclusters
>> 2)) {
2035 if (num
>= mbstat
.m_bigclfree
)
2036 i
= num
- mbstat
.m_bigclfree
;
2037 if (((mbstat
.m_bigclusters
+ num
) >> 1) > mbstat
.m_bigclfree
)
2038 j
= ((mbstat
.m_bigclusters
+ num
) >> 1) - mbstat
.m_bigclfree
;
2040 if (i
+ mbstat
.m_bigclusters
>= (nmbclusters
>> 2))
2041 i
= (nmbclusters
>> 2) - mbstat
.m_bigclusters
;
2049 * Copy data from a buffer back into the indicated mbuf chain,
2050 * starting "off" bytes from the beginning, extending the mbuf
2051 * chain if necessary.
2061 struct mbuf
*m
= m0
, *n
;
2066 while (off
> (mlen
= m
->m_len
)) {
2069 if (m
->m_next
== 0) {
2070 n
= m_getclr(M_DONTWAIT
, m
->m_type
);
2073 n
->m_len
= min(MLEN
, len
+ off
);
2079 mlen
= min (m
->m_len
- off
, len
);
2080 bcopy(cp
, off
+ mtod(m
, caddr_t
), (unsigned)mlen
);
2088 if (m
->m_next
== 0) {
2089 n
= m_get(M_DONTWAIT
, m
->m_type
);
2092 n
->m_len
= min(MLEN
, len
);
2097 out
: if (((m
= m0
)->m_flags
& M_PKTHDR
) && (m
->m_pkthdr
.len
< totlen
))
2098 m
->m_pkthdr
.len
= totlen
;
2102 char *mcl_to_paddr(char *addr
) {
2105 if (addr
< (char *)mbutl
|| addr
>= (char *)embutl
)
2107 base_phys
= mcl_paddr
[(addr
- (char *)mbutl
) >> PGSHIFT
];
2111 return ((char *)((int)base_phys
| ((int)addr
& PGOFSET
)));
2115 * Dup the mbuf chain passed in. The whole thing. No cute additional cruft.
2116 * And really copy the thing. That way, we don't "precompute" checksums
2117 * for unsuspecting consumers.
2118 * Assumption: m->m_nextpkt == 0.
2119 * Trick: for small packets, don't dup into a cluster. That way received
2120 * packets don't take up too much room in the sockbuf (cf. sbspace()).
2125 m_dup(struct mbuf
*m
, int how
)
2127 struct mbuf
*n
, **np
;
2133 if (m
->m_flags
& M_PKTHDR
)
2137 * Quick check: if we have one mbuf and its data fits in an
2138 * mbuf with packet header, just copy and go.
2140 if (m
->m_next
== NULL
)
2141 { /* Then just move the data into an mbuf and be done... */
2143 { if (m
->m_pkthdr
.len
<= MHLEN
)
2144 { if ((n
= m_gethdr(how
, m
->m_type
)) == NULL
)
2146 n
->m_len
= m
->m_len
;
2147 m_dup_pkthdr(n
, m
, how
);
2148 bcopy(m
->m_data
, n
->m_data
, m
->m_len
);
2151 } else if (m
->m_len
<= MLEN
)
2152 { if ((n
= m_get(how
, m
->m_type
)) == NULL
)
2154 bcopy(m
->m_data
, n
->m_data
, m
->m_len
);
2155 n
->m_len
= m
->m_len
;
2162 kprintf("<%x: %x, %x, %x\n", m
, m
->m_flags
, m
->m_len
,
2166 n
= m_gethdr(how
, m
->m_type
);
2168 n
= m_get(how
, m
->m_type
);
2171 if (m
->m_flags
& M_EXT
)
2173 if ((n
->m_flags
& M_EXT
) == 0)
2178 { /* Don't use M_COPY_PKTHDR: preserve m_data */
2179 m_dup_pkthdr(n
, m
, how
);
2181 if ((n
->m_flags
& M_EXT
) == 0)
2182 n
->m_data
= n
->m_pktdat
;
2184 n
->m_len
= m
->m_len
;
2186 * Get the dup on the same bdry as the original
2187 * Assume that the two mbufs have the same offset to data area
2188 * (up to word bdries)
2190 bcopy(mtod(m
, caddr_t
), mtod(n
, caddr_t
), (unsigned)n
->m_len
);
2194 kprintf(">%x: %x, %x, %x\n", n
, n
->m_flags
, n
->m_len
,
2209 m_mclref(struct mbuf
*p
)
2211 return (_MCLREF(p
));
2215 m_mclunref(struct mbuf
*p
)
2217 return (_MCLUNREF(p
));
2220 /* change mbuf to new type */
2222 m_mchtype(struct mbuf
*m
, int t
)
2225 mbstat
.m_mtypes
[(m
)->m_type
]--;
2226 mbstat
.m_mtypes
[t
]++;
2231 void *m_mtod(struct mbuf
*m
)
2233 return ((m
)->m_data
);
2236 struct mbuf
*m_dtom(void *x
)
2238 return ((struct mbuf
*)((u_long
)(x
) & ~(MSIZE
-1)));
2241 int m_mtocl(void *x
)
2243 return (((char *)(x
) - (char *)mbutl
) / sizeof(union mcluster
));
2246 union mcluster
*m_cltom(int x
)
2248 return ((union mcluster
*)(mbutl
+ (x
)));
2252 void m_mcheck(struct mbuf
*m
)
2254 if (m
->m_type
!= MT_FREE
)
2255 panic("mget MCHECK: m_type=%x m=%x", m
->m_type
, m
);
2259 mbuf_expand_thread(void)
2263 if (mbuf_expand_mcl
) {
2266 /* Adjust to the current number of cluster in use */
2267 n
= mbuf_expand_mcl
- (mbstat
.m_clusters
- mbstat
.m_clfree
);
2268 mbuf_expand_mcl
= 0;
2271 (void)m_clalloc(n
, M_WAIT
, MCLBYTES
, 1);
2273 if (mbuf_expand_big
) {
2276 /* Adjust to the current number of 4 KB cluster in use */
2277 n
= mbuf_expand_big
- (mbstat
.m_bigclusters
- mbstat
.m_bigclfree
);
2278 mbuf_expand_big
= 0;
2281 (void)m_clalloc(n
, M_WAIT
, NBPG
, 1);
2285 * Because we can run out of memory before filling the mbuf map, we
2286 * should not allocate more clusters than they are mbufs -- otherwise
2287 * we could have a large number of useless clusters allocated.
2289 while (mbstat
.m_mbufs
< mbstat
.m_bigclusters
+ mbstat
.m_clusters
) {
2290 if (m_expand(M_WAIT
) == 0)
2294 assert_wait(&mbuf_expand_thread_wakeup
, THREAD_UNINT
);
2295 (void) thread_block((thread_continue_t
)mbuf_expand_thread
);
2300 mbuf_expand_thread_init(void)
2302 mbuf_expand_thread_initialized
++;
2303 mbuf_expand_thread();
2306 SYSCTL_DECL(_kern_ipc
);
2307 SYSCTL_STRUCT(_kern_ipc
, KIPC_MBSTAT
, mbstat
, CTLFLAG_RW
, &mbstat
, mbstat
, "");