2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1982, 1986, 1988, 1991, 1993
25 * The Regents of the University of California. All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
59 * 10/15/97 Annette DeSchon (deschon@apple.com)
60 * Fixed bug in which all cluster mbufs were broken up
61 * into regular mbufs: Some clusters are now reserved.
62 * When a cluster is needed, regular mbufs are no longer
63 * used. (Radar 1683621)
64 * 20-May-95 Mac Gillon (mgillon) at NeXT
65 * New version based on 4.4
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/malloc.h>
72 #include <sys/kernel.h>
73 #include <sys/syslog.h>
74 #include <sys/protosw.h>
75 #include <sys/domain.h>
76 #include <net/netisr.h>
78 #include <kern/queue.h>
80 extern kernel_pmap
; /* The kernel's pmap */
82 decl_simple_lock_data(, mbuf_slock
);
83 struct mbuf
*mfree
; /* mbuf free list */
84 struct mbuf
*mfreelater
; /* mbuf deallocation list */
85 extern vm_map_t mb_map
; /* special map */
86 int m_want
; /* sleepers on mbufs */
87 extern int nmbclusters
; /* max number of mapped clusters */
88 short *mclrefcnt
; /* mapped cluster reference counts */
90 union mcluster
*mclfree
; /* mapped cluster free list */
91 int max_linkhdr
; /* largest link-level header */
92 int max_protohdr
; /* largest protocol header */
93 int max_hdr
; /* largest link+protocol header */
94 int max_datalen
; /* MHLEN - max_hdr */
95 struct mbstat mbstat
; /* statistics */
96 union mcluster
*mbutl
; /* first mapped cluster address */
97 union mcluster
*embutl
; /* ending virtual address of mclusters */
99 static int nclpp
; /* # clusters per physical page */
100 static char mbfail
[] = "mbuf not mapped";
102 static int m_howmany();
104 /* The number of cluster mbufs that are allocated, to start. */
105 #define MINCL max(16, 2)
107 extern int dlil_input_thread_wakeup
;
108 extern int dlil_expand_mcl
;
109 extern int dlil_initialized
;
120 nclpp
= round_page(MCLBYTES
) / MCLBYTES
; /* see mbufgc() */
121 if (nclpp
< 1) nclpp
= 1;
123 // NETISR_LOCKINIT();
124 if (nmbclusters
== 0)
125 nmbclusters
= NMBCLUSTERS
;
126 MALLOC(mclrefcnt
, short *, nmbclusters
* sizeof (short),
130 for (m
= 0; m
< nmbclusters
; m
++)
133 MALLOC(mcl_paddr
, int *, (nmbclusters
/(PAGE_SIZE
/CLBYTES
)) * sizeof (int),
137 bzero((char *)mcl_paddr
, (nmbclusters
/(PAGE_SIZE
/CLBYTES
)) * sizeof (int));
139 embutl
= (union mcluster
*)((unsigned char *)mbutl
+ (nmbclusters
* MCLBYTES
));
141 PE_parse_boot_arg("initmcl", &initmcl
);
143 if (m_clalloc(max(PAGE_SIZE
/CLBYTES
, 1) * initmcl
, M_WAIT
) == 0)
152 * Allocate some number of mbuf clusters
153 * and place on cluster free list.
156 m_clalloc(ncl
, nowait
)
160 register union mcluster
*mcl
;
163 static char doing_alloc
;
166 * Honor the caller's wish to block or not block.
167 * We have a way to grow the pool asynchronously,
168 * by kicking the dlil_input_thread.
170 if ((i
= m_howmany()) <= 0)
173 if ((nowait
== M_DONTWAIT
))
178 size
= round_page(ncl
* MCLBYTES
);
179 mcl
= (union mcluster
*)kmem_mb_alloc(mb_map
, size
);
181 if (mcl
== 0 && ncl
> 1) {
182 size
= round_page(MCLBYTES
); /* Try for 1 if failed */
183 mcl
= (union mcluster
*)kmem_mb_alloc(mb_map
, size
);
188 ncl
= size
/ MCLBYTES
;
189 for (i
= 0; i
< ncl
; i
++) {
190 if (++mclrefcnt
[mtocl(mcl
)] != 0)
191 panic("m_clalloc already there");
192 if (((int)mcl
& PAGE_MASK
) == 0)
193 mcl_paddr
[((char *)mcl
- (char *)mbutl
)/PAGE_SIZE
] = pmap_extract(kernel_pmap
, (char *)mcl
);
195 mcl
->mcl_next
= mclfree
;
198 mbstat
.m_clfree
+= ncl
;
199 mbstat
.m_clusters
+= ncl
;
206 * When non-blocking we kick the dlil thread if we havve to grow the
207 * pool or if the number of free clusters is less than requested.
209 if ((nowait
== M_DONTWAIT
) && (i
> 0 || ncl
>= mbstat
.m_clfree
)) {
211 if (dlil_initialized
)
212 wakeup((caddr_t
)&dlil_input_thread_wakeup
);
215 if (mbstat
.m_clfree
>= ncl
)
224 * Add more free mbufs by cutting up a cluster.
229 register caddr_t mcl
;
231 if (mbstat
.m_clfree
< (mbstat
.m_clusters
>> 4))
232 /* 1/16th of the total number of cluster mbufs allocated is
233 reserved for large packets. The number reserved must
234 always be < 1/2, or future allocation will be prevented.
238 MCLALLOC(mcl
, canwait
);
240 register struct mbuf
*m
= (struct mbuf
*)mcl
;
241 register int i
= NMBPCL
;
243 mbstat
.m_mtypes
[MT_FREE
] += i
;
253 if (i
) wakeup((caddr_t
)&mfree
);
260 * When MGET failes, ask protocols to free space when short of memory,
261 * then re-attempt to allocate an mbuf.
264 m_retry(canwait
, type
)
267 #define m_retry(h, t) 0
268 register struct mbuf
*m
;
272 boolean_t funnel_state
;
275 (void) m_expand(canwait
);
277 if (m
|| canwait
== M_DONTWAIT
)
285 if (dlil_initialized
)
286 wakeup((caddr_t
)&dlil_input_thread_wakeup
);
292 assert_wait((caddr_t
)&mfree
, THREAD_UNINT
);
297 * Grab network funnel because m_reclaim calls into the
298 * socket domains and tsleep end-up calling splhigh
300 fnl
= thread_funnel_get();
301 if (fnl
&& (fnl
== kernel_flock
)) {
303 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
305 funnel_state
= thread_funnel_set(network_flock
, TRUE
);
309 /* Sleep with a small timeout as insurance */
310 (void) tsleep((caddr_t
)0, PZERO
-1, "m_retry", hz
);
313 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
315 thread_funnel_set(network_flock
, funnel_state
);
322 * As above; retry an MGETHDR.
325 m_retryhdr(canwait
, type
)
328 register struct mbuf
*m
;
330 if (m
= m_retry(canwait
, type
)) {
331 m
->m_flags
|= M_PKTHDR
;
332 m
->m_data
= m
->m_pktdat
;
339 register struct domain
*dp
;
340 register struct protosw
*pr
;
342 for (dp
= domains
; dp
; dp
= dp
->dom_next
)
343 for (pr
= dp
->dom_protosw
; pr
; pr
= pr
->pr_next
)
350 * Space allocation routines.
351 * These are also available as macros
352 * for critical paths.
358 register struct mbuf
*m
;
360 MGET(m
, nowait
, type
);
365 m_gethdr(nowait
, type
)
368 register struct mbuf
*m
;
370 MGETHDR(m
, nowait
, type
);
375 m_getclr(nowait
, type
)
378 register struct mbuf
*m
;
380 MGET(m
, nowait
, type
);
383 bzero(mtod(m
, caddr_t
), MLEN
);
391 struct mbuf
*n
= m
->m_next
;
394 if (m
->m_type
== MT_FREE
)
395 panic("freeing free mbuf");
398 if (m
->m_flags
& M_EXT
) {
399 if (MCLHASREFERENCE(m
)) {
400 remque((queue_t
)&m
->m_ext
.ext_refs
);
401 } else if (m
->m_ext
.ext_free
== NULL
) {
402 union mcluster
*mcl
= (union mcluster
*)m
->m_ext
.ext_buf
;
404 mcl
->mcl_next
= mclfree
;
409 /* *** Since m_split() increments "mclrefcnt[mtocl(m->m_ext.ext_buf)]",
410 and AppleTalk ADSP uses m_split(), this incorrect sanity check
413 else /* sanity check - not referenced this way */
414 panic("m_free m_ext cluster not free");
417 (*(m
->m_ext
.ext_free
))(m
->m_ext
.ext_buf
,
418 m
->m_ext
.ext_size
, m
->m_ext
.ext_arg
);
421 mbstat
.m_mtypes
[m
->m_type
]--;
424 mbstat
.m_mtypes
[m
->m_type
]++;
432 if (i
) wakeup((caddr_t
)&mfree
);
436 /* Best effort to get a mbuf cluster + pkthdr under one lock.
437 * If we don't have them avail, just bail out and use the regular
439 * Used by drivers to allocated packets on receive ring.
445 m_clalloc(1, M_DONTWAIT
); /* takes the MBUF_LOCK, but doesn't release it... */
446 if ((mfree
!= 0) && (mclfree
!= 0)) { /* mbuf + cluster are available */
450 ++mclrefcnt
[mtocl(m
)];
451 mbstat
.m_mtypes
[MT_FREE
]--;
452 mbstat
.m_mtypes
[MT_DATA
]++;
453 m
->m_ext
.ext_buf
= (caddr_t
)mclfree
; /* get the cluster */
454 ++mclrefcnt
[mtocl(m
->m_ext
.ext_buf
)];
456 mclfree
= ((union mcluster
*)(m
->m_ext
.ext_buf
))->mcl_next
;
458 m
->m_next
= m
->m_nextpkt
= 0;
459 m
->m_ext
.ext_free
= 0;
461 m
->m_data
= m
->m_ext
.ext_buf
;
462 m
->m_flags
= M_PKTHDR
| M_EXT
;
463 m
->m_pkthdr
.aux
= (struct mbuf
*)NULL
;
464 m
->m_pkthdr
.csum_data
= 0;
465 m
->m_pkthdr
.csum_flags
= 0;
466 m
->m_ext
.ext_size
= MCLBYTES
;
467 m
->m_ext
.ext_refs
.forward
= m
->m_ext
.ext_refs
.backward
=
471 else { /* slow path: either mbuf or cluster need to be allocated anyway */
474 MGETHDR(m
, M_WAITOK
, MT_DATA
);
479 MCLGET( m
, M_WAITOK
);
480 if ( ( m
->m_flags
& M_EXT
) == 0 )
488 /* free and mbuf list (m_nextpkt) while following m_next under one lock.
489 * returns the count for mbufs packets freed. Used by the drivers.
495 struct mbuf
*nextpkt
;
502 nextpkt
= m
->m_nextpkt
; /* chain of linked mbufs from driver */
506 while (m
) { /* free the mbuf chain (like mfreem) */
507 struct mbuf
*n
= m
->m_next
;
508 if (n
&& n
->m_nextpkt
)
509 panic("m_freem_list: m_nextpkt of m_next != NULL");
510 if (m
->m_type
== MT_FREE
)
511 panic("freeing free mbuf");
512 if (m
->m_flags
& M_EXT
) {
513 if (MCLHASREFERENCE(m
)) {
514 remque((queue_t
)&m
->m_ext
.ext_refs
);
515 } else if (m
->m_ext
.ext_free
== NULL
) {
516 union mcluster
*mcl
= (union mcluster
*)m
->m_ext
.ext_buf
;
518 mcl
->mcl_next
= mclfree
;
523 (*(m
->m_ext
.ext_free
))(m
->m_ext
.ext_buf
,
524 m
->m_ext
.ext_size
, m
->m_ext
.ext_arg
);
527 mbstat
.m_mtypes
[m
->m_type
]--;
530 mbstat
.m_mtypes
[m
->m_type
]++;
537 m
= nextpkt
; /* bump m with saved nextpkt if any */
542 if (i
) wakeup((caddr_t
)&mfree
);
548 register struct mbuf
*m
;
555 * Mbuffer utility routines.
558 * Compute the amount of space available
559 * before the current start of data in an mbuf.
562 register struct mbuf
*m
;
564 if (m
->m_flags
& M_EXT
) {
565 if (MCLHASREFERENCE(m
))
567 return (m
->m_data
- m
->m_ext
.ext_buf
);
569 if (m
->m_flags
& M_PKTHDR
)
570 return (m
->m_data
- m
->m_pktdat
);
571 return (m
->m_data
- m
->m_dat
);
575 * Compute the amount of space available
576 * after the end of data in an mbuf.
579 register struct mbuf
*m
;
581 if (m
->m_flags
& M_EXT
) {
582 if (MCLHASREFERENCE(m
))
584 return (m
->m_ext
.ext_buf
+ m
->m_ext
.ext_size
-
585 (m
->m_data
+ m
->m_len
));
587 return (&m
->m_dat
[MLEN
] - (m
->m_data
+ m
->m_len
));
591 * Lesser-used path for M_PREPEND:
592 * allocate new mbuf to prepend to chain,
596 m_prepend(m
, len
, how
)
597 register struct mbuf
*m
;
602 MGET(mn
, how
, m
->m_type
);
603 if (mn
== (struct mbuf
*)NULL
) {
605 return ((struct mbuf
*)NULL
);
607 if (m
->m_flags
& M_PKTHDR
) {
608 M_COPY_PKTHDR(mn
, m
);
609 m
->m_flags
&= ~M_PKTHDR
;
620 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
621 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
622 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
627 m_copym(m
, off0
, len
, wait
)
628 register struct mbuf
*m
;
632 register struct mbuf
*n
, **np
;
633 register int off
= off0
;
637 if (off
< 0 || len
< 0)
639 if (off
== 0 && m
->m_flags
& M_PKTHDR
)
653 if (len
!= M_COPYALL
)
657 MGET(n
, wait
, m
->m_type
);
663 if (len
== M_COPYALL
)
664 n
->m_pkthdr
.len
-= off0
;
666 n
->m_pkthdr
.len
= len
;
669 if (len
== M_COPYALL
) {
670 if (min(len
, (m
->m_len
- off
)) == len
) {
671 printf("m->m_len %d - off %d = %d, %d\n",
672 m
->m_len
, off
, m
->m_len
- off
,
673 min(len
, (m
->m_len
- off
)));
676 n
->m_len
= min(len
, (m
->m_len
- off
));
677 if (n
->m_len
== M_COPYALL
) {
678 printf("n->m_len == M_COPYALL, fixing\n");
681 if (m
->m_flags
& M_EXT
) {
684 insque((queue_t
)&n
->m_ext
.ext_refs
, (queue_t
)&m
->m_ext
.ext_refs
);
686 n
->m_data
= m
->m_data
+ off
;
689 bcopy(mtod(m
, caddr_t
)+off
, mtod(n
, caddr_t
),
691 if (len
!= M_COPYALL
)
707 * Copy data from an mbuf chain starting "off" bytes from the beginning,
708 * continuing for "len" bytes, into the indicated buffer.
710 void m_copydata(m
, off
, len
, cp
)
711 register struct mbuf
*m
;
716 register unsigned count
;
718 if (off
< 0 || len
< 0)
731 count
= min(m
->m_len
- off
, len
);
732 bcopy(mtod(m
, caddr_t
) + off
, cp
, count
);
741 * Concatenate mbuf chain n to m.
742 * Both chains must be of the same type (e.g. MT_DATA).
743 * Any m_pkthdr is not updated.
746 register struct mbuf
*m
, *n
;
751 if (m
->m_flags
& M_EXT
||
752 m
->m_data
+ m
->m_len
+ n
->m_len
>= &m
->m_dat
[MLEN
]) {
753 /* just join the two chains */
757 /* splat the data from one into the other */
758 bcopy(mtod(n
, caddr_t
), mtod(m
, caddr_t
) + m
->m_len
,
760 m
->m_len
+= n
->m_len
;
770 register int len
= req_len
;
771 register struct mbuf
*m
;
774 if ((m
= mp
) == NULL
)
780 while (m
!= NULL
&& len
> 0) {
781 if (m
->m_len
<= len
) {
792 if (m
->m_flags
& M_PKTHDR
)
793 m
->m_pkthdr
.len
-= (req_len
- len
);
796 * Trim from tail. Scan the mbuf chain,
797 * calculating its length and finding the last mbuf.
798 * If the adjustment only affects this mbuf, then just
799 * adjust and return. Otherwise, rescan and truncate
800 * after the remaining size.
806 if (m
->m_next
== (struct mbuf
*)0)
810 if (m
->m_len
>= len
) {
813 if (m
->m_flags
& M_PKTHDR
)
814 m
->m_pkthdr
.len
-= len
;
821 * Correct length for chain is "count".
822 * Find the mbuf with last data, adjust its length,
823 * and toss data from remaining mbufs on chain.
826 if (m
->m_flags
& M_PKTHDR
)
827 m
->m_pkthdr
.len
= count
;
828 for (; m
; m
= m
->m_next
) {
829 if (m
->m_len
>= count
) {
835 while (m
= m
->m_next
)
841 * Rearange an mbuf chain so that len bytes are contiguous
842 * and in the data area of an mbuf (so that mtod and dtom
843 * will work for a structure of size len). Returns the resulting
844 * mbuf chain on success, frees it and returns null on failure.
845 * If there is room, it will add up to max_protohdr-len extra bytes to the
846 * contiguous region in an attempt to avoid being called next time.
852 register struct mbuf
*n
;
855 register struct mbuf
*m
;
860 * If first mbuf has no cluster, and has room for len bytes
861 * without shifting current data, pullup into it,
862 * otherwise allocate a new mbuf to prepend to the chain.
864 if ((n
->m_flags
& M_EXT
) == 0 &&
865 n
->m_data
+ len
< &n
->m_dat
[MLEN
] && n
->m_next
) {
874 MGET(m
, M_DONTWAIT
, n
->m_type
);
878 if (n
->m_flags
& M_PKTHDR
) {
880 n
->m_flags
&= ~M_PKTHDR
;
883 space
= &m
->m_dat
[MLEN
] - (m
->m_data
+ m
->m_len
);
885 count
= min(min(max(len
, max_protohdr
), space
), n
->m_len
);
886 bcopy(mtod(n
, caddr_t
), mtod(m
, caddr_t
) + m
->m_len
,
896 } while (len
> 0 && n
);
910 * Partition an mbuf chain in two pieces, returning the tail --
911 * all but the first len0 bytes. In case of failure, it returns NULL and
912 * attempts to restore the chain to its original state.
915 m_split(m0
, len0
, wait
)
916 register struct mbuf
*m0
;
919 register struct mbuf
*m
, *n
;
920 unsigned len
= len0
, remain
;
922 for (m
= m0
; m
&& len
> m
->m_len
; m
= m
->m_next
)
926 remain
= m
->m_len
- len
;
927 if (m0
->m_flags
& M_PKTHDR
) {
928 MGETHDR(n
, wait
, m0
->m_type
);
931 n
->m_pkthdr
.rcvif
= m0
->m_pkthdr
.rcvif
;
932 n
->m_pkthdr
.len
= m0
->m_pkthdr
.len
- len0
;
933 m0
->m_pkthdr
.len
= len0
;
934 if (m
->m_flags
& M_EXT
)
936 if (remain
> MHLEN
) {
937 /* m can't be the lead packet */
939 n
->m_next
= m_split(m
, len
, wait
);
940 if (n
->m_next
== 0) {
947 } else if (remain
== 0) {
952 MGET(n
, wait
, m
->m_type
);
958 if (m
->m_flags
& M_EXT
) {
962 insque((queue_t
)&n
->m_ext
.ext_refs
, (queue_t
)&m
->m_ext
.ext_refs
);
964 n
->m_data
= m
->m_data
+ len
;
966 bcopy(mtod(m
, caddr_t
) + len
, mtod(n
, caddr_t
), remain
);
970 n
->m_next
= m
->m_next
;
975 * Routine to copy from device local memory into mbufs.
978 m_devget(buf
, totlen
, off0
, ifp
, copy
)
984 register struct mbuf
*m
;
985 struct mbuf
*top
= 0, **mp
= &top
;
986 register int off
= off0
, len
;
994 * If 'off' is non-zero, packet is trailer-encapsulated,
995 * so we have to skip the type and length fields.
997 cp
+= off
+ 2 * sizeof(u_int16_t
);
998 totlen
-= 2 * sizeof(u_int16_t
);
1000 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
1003 m
->m_pkthdr
.rcvif
= ifp
;
1004 m
->m_pkthdr
.len
= totlen
;
1007 while (totlen
> 0) {
1009 MGET(m
, M_DONTWAIT
, MT_DATA
);
1016 len
= min(totlen
, epkt
- cp
);
1017 if (len
>= MINCLSIZE
) {
1018 MCLGET(m
, M_DONTWAIT
);
1019 if (m
->m_flags
& M_EXT
)
1020 m
->m_len
= len
= min(len
, MCLBYTES
);
1022 /* give up when it's out of cluster mbufs */
1030 * Place initial small packet/header at end of mbuf.
1032 if (len
< m
->m_len
) {
1033 if (top
== 0 && len
+ max_linkhdr
<= m
->m_len
)
1034 m
->m_data
+= max_linkhdr
;
1040 copy(cp
, mtod(m
, caddr_t
), (unsigned)len
);
1042 bcopy(cp
, mtod(m
, caddr_t
), (unsigned)len
);
1054 * Cluster freelist allocation check. The mbuf lock must be held.
1055 * Ensure hysteresis between hi/lo.
1063 if (mbstat
.m_clusters
< MINCL
)
1064 return (MINCL
- mbstat
.m_clusters
);
1065 /* Too few (free < 1/2 total) and not over maximum */
1066 if (mbstat
.m_clusters
< nmbclusters
&&
1067 (i
= ((mbstat
.m_clusters
>> 1) - mbstat
.m_clfree
)) > 0)
1074 * Copy data from a buffer back into the indicated mbuf chain,
1075 * starting "off" bytes from the beginning, extending the mbuf
1076 * chain if necessary.
1079 m_copyback(m0
, off
, len
, cp
)
1086 register struct mbuf
*m
= m0
, *n
;
1091 while (off
> (mlen
= m
->m_len
)) {
1094 if (m
->m_next
== 0) {
1095 n
= m_getclr(M_DONTWAIT
, m
->m_type
);
1098 n
->m_len
= min(MLEN
, len
+ off
);
1104 mlen
= min (m
->m_len
- off
, len
);
1105 bcopy(cp
, off
+ mtod(m
, caddr_t
), (unsigned)mlen
);
1113 if (m
->m_next
== 0) {
1114 n
= m_get(M_DONTWAIT
, m
->m_type
);
1117 n
->m_len
= min(MLEN
, len
);
1122 out
: if (((m
= m0
)->m_flags
& M_PKTHDR
) && (m
->m_pkthdr
.len
< totlen
))
1123 m
->m_pkthdr
.len
= totlen
;
1127 char *mcl_to_paddr(register char *addr
) {
1128 register int base_phys
;
1130 if (addr
< (char *)mbutl
|| addr
>= (char *)embutl
)
1132 base_phys
= mcl_paddr
[(addr
- (char *)mbutl
) >> PAGE_SHIFT
];
1136 return ((char *)((int)base_phys
| ((int)addr
& PAGE_MASK
)));
1140 * Dup the mbuf chain passed in. The whole thing. No cute additional cruft.
1141 * And really copy the thing. That way, we don't "precompute" checksums
1142 * for unsuspecting consumers.
1143 * Assumption: m->m_nextpkt == 0.
1144 * Trick: for small packets, don't dup into a cluster. That way received
1145 * packets don't take up too much room in the sockbuf (cf. sbspace()).
1150 m_dup(register struct mbuf
*m
, int how
)
1151 { register struct mbuf
*n
, **np
;
1157 if (m
->m_flags
& M_PKTHDR
)
1161 * Quick check: if we have one mbuf and its data fits in an
1162 * mbuf with packet header, just copy and go.
1164 if (m
->m_next
== NULL
)
1165 { /* Then just move the data into an mbuf and be done... */
1167 { if (m
->m_pkthdr
.len
<= MHLEN
)
1168 { if ((n
= m_gethdr(how
, m
->m_type
)) == NULL
)
1170 n
->m_len
= m
->m_len
;
1171 n
->m_flags
|= (m
->m_flags
& M_COPYFLAGS
);
1172 n
->m_pkthdr
.len
= m
->m_pkthdr
.len
;
1173 n
->m_pkthdr
.rcvif
= m
->m_pkthdr
.rcvif
;
1174 n
->m_pkthdr
.header
= NULL
;
1175 n
->m_pkthdr
.aux
= NULL
;
1176 bcopy(m
->m_data
, n
->m_data
, m
->m_pkthdr
.len
);
1179 } else if (m
->m_len
<= MLEN
)
1180 { if ((n
= m_get(how
, m
->m_type
)) == NULL
)
1182 bcopy(m
->m_data
, n
->m_data
, m
->m_len
);
1183 n
->m_len
= m
->m_len
;
1190 kprintf("<%x: %x, %x, %x\n", m
, m
->m_flags
, m
->m_len
,
1194 n
= m_gethdr(how
, m
->m_type
);
1196 n
= m_get(how
, m
->m_type
);
1199 if (m
->m_flags
& M_EXT
)
1201 if ((n
->m_flags
& M_EXT
) == 0)
1206 { /* Don't use M_COPY_PKTHDR: preserve m_data */
1207 n
->m_pkthdr
= m
->m_pkthdr
;
1208 n
->m_flags
|= (m
->m_flags
& M_COPYFLAGS
);
1210 if ((n
->m_flags
& M_EXT
) == 0)
1211 n
->m_data
= n
->m_pktdat
;
1213 n
->m_len
= m
->m_len
;
1215 * Get the dup on the same bdry as the original
1216 * Assume that the two mbufs have the same offset to data area
1217 * (up to word bdries)
1219 bcopy(mtod(m
, caddr_t
), mtod(n
, caddr_t
), (unsigned)n
->m_len
);
1223 kprintf(">%x: %x, %x, %x\n", n
, n
->m_flags
, n
->m_len
,
1238 #include <sys/sysctl.h>
1240 static int mhog_num
= 0;
1241 static struct mbuf
*mhog_chain
= 0;
1242 static int mhog_wait
= 1;
1245 sysctl_mhog_num SYSCTL_HANDLER_ARGS
1250 error
= sysctl_handle_int(oidp
, oidp
->oid_arg1
, oidp
->oid_arg2
, req
);
1251 if (!error
&& req
->newptr
) {
1256 m_freem(mhog_chain
);
1260 for (i
= 0; i
< mhog_num
; i
++) {
1261 MGETHDR(m
, mhog_wait
? M_WAIT
: M_DONTWAIT
, MT_DATA
);
1265 MCLGET(m
, mhog_wait
? M_WAIT
: M_DONTWAIT
);
1266 if ((m
->m_flags
& M_EXT
) == 0) {
1271 m
->m_next
= mhog_chain
;
1280 SYSCTL_NODE(_kern_ipc
, OID_AUTO
, mhog
, CTLFLAG_RW
, 0, "mbuf hog");
1282 SYSCTL_PROC(_kern_ipc_mhog
, OID_AUTO
, cluster
, CTLTYPE_INT
|CTLFLAG_RW
,
1283 &mhog_num
, 0, &sysctl_mhog_num
, "I", "");
1284 SYSCTL_INT(_kern_ipc_mhog
, OID_AUTO
, wait
, CTLFLAG_RW
, &mhog_wait
,