]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/uipc_mbuf.c
513cc8e5370ebc2f4f4e3efb56d243679e25e7e4
[apple/xnu.git] / bsd / kern / uipc_mbuf.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1982, 1986, 1988, 1991, 1993
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
56 */
57 /* HISTORY
58 *
59 * 10/15/97 Annette DeSchon (deschon@apple.com)
60 * Fixed bug in which all cluster mbufs were broken up
61 * into regular mbufs: Some clusters are now reserved.
62 * When a cluster is needed, regular mbufs are no longer
63 * used. (Radar 1683621)
64 * 20-May-95 Mac Gillon (mgillon) at NeXT
65 * New version based on 4.4
66 */
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/malloc.h>
71 #include <sys/mbuf.h>
72 #include <sys/kernel.h>
73 #include <sys/syslog.h>
74 #include <sys/protosw.h>
75 #include <sys/domain.h>
76 #include <net/netisr.h>
77
78 #include <kern/queue.h>
79
80 extern kernel_pmap; /* The kernel's pmap */
81
82 decl_simple_lock_data(, mbuf_slock);
83 struct mbuf *mfree; /* mbuf free list */
84 struct mbuf *mfreelater; /* mbuf deallocation list */
85 extern vm_map_t mb_map; /* special map */
86 int m_want; /* sleepers on mbufs */
87 extern int nmbclusters; /* max number of mapped clusters */
88 short *mclrefcnt; /* mapped cluster reference counts */
89 int *mcl_paddr;
90 union mcluster *mclfree; /* mapped cluster free list */
91 int max_linkhdr; /* largest link-level header */
92 int max_protohdr; /* largest protocol header */
93 int max_hdr; /* largest link+protocol header */
94 int max_datalen; /* MHLEN - max_hdr */
95 struct mbstat mbstat; /* statistics */
96 union mcluster *mbutl; /* first mapped cluster address */
97 union mcluster *embutl; /* ending virtual address of mclusters */
98
99 static int nclpp; /* # clusters per physical page */
100 static char mbfail[] = "mbuf not mapped";
101
102 static int m_howmany();
103
104 /* The number of cluster mbufs that are allocated, to start. */
105 #define MINCL max(16, 2)
106
107 extern int dlil_input_thread_wakeup;
108 extern int dlil_expand_mcl;
109 extern int dlil_initialized;
110
111
112 void
113 mbinit()
114 {
115 int s,m;
116 int initmcl = 32;
117
118 if (nclpp)
119 return;
120 nclpp = round_page(MCLBYTES) / MCLBYTES; /* see mbufgc() */
121 if (nclpp < 1) nclpp = 1;
122 MBUF_LOCKINIT();
123 // NETISR_LOCKINIT();
124
125 mbstat.m_msize = MSIZE;
126 mbstat.m_mclbytes = MCLBYTES;
127 mbstat.m_minclsize = MINCLSIZE;
128 mbstat.m_mlen = MLEN;
129 mbstat.m_mhlen = MHLEN;
130
131 if (nmbclusters == 0)
132 nmbclusters = NMBCLUSTERS;
133 MALLOC(mclrefcnt, short *, nmbclusters * sizeof (short),
134 M_TEMP, M_WAITOK);
135 if (mclrefcnt == 0)
136 panic("mbinit");
137 for (m = 0; m < nmbclusters; m++)
138 mclrefcnt[m] = -1;
139
140 MALLOC(mcl_paddr, int *, (nmbclusters/(PAGE_SIZE/CLBYTES)) * sizeof (int),
141 M_TEMP, M_WAITOK);
142 if (mcl_paddr == 0)
143 panic("mbinit1");
144 bzero((char *)mcl_paddr, (nmbclusters/(PAGE_SIZE/CLBYTES)) * sizeof (int));
145
146 embutl = (union mcluster *)((unsigned char *)mbutl + (nmbclusters * MCLBYTES));
147
148 PE_parse_boot_arg("initmcl", &initmcl);
149
150 if (m_clalloc(max(PAGE_SIZE/CLBYTES, 1) * initmcl, M_WAIT) == 0)
151 goto bad;
152 MBUF_UNLOCK();
153 return;
154 bad:
155 panic("mbinit");
156 }
157
158 /*
159 * Allocate some number of mbuf clusters
160 * and place on cluster free list.
161 */
162 /* ARGSUSED */
163 m_clalloc(ncl, nowait)
164 register int ncl;
165 int nowait;
166 {
167 register union mcluster *mcl;
168 register int i;
169 vm_size_t size;
170 static char doing_alloc;
171
172 /*
173 * Honor the caller's wish to block or not block.
174 * We have a way to grow the pool asynchronously,
175 * by kicking the dlil_input_thread.
176 */
177 if ((i = m_howmany()) <= 0)
178 goto out;
179
180 if ((nowait == M_DONTWAIT))
181 goto out;
182
183 if (ncl < i)
184 ncl = i;
185 size = round_page(ncl * MCLBYTES);
186 mcl = (union mcluster *)kmem_mb_alloc(mb_map, size);
187
188 if (mcl == 0 && ncl > 1) {
189 size = round_page(MCLBYTES); /* Try for 1 if failed */
190 mcl = (union mcluster *)kmem_mb_alloc(mb_map, size);
191 }
192
193 if (mcl) {
194 MBUF_LOCK();
195 ncl = size / MCLBYTES;
196 for (i = 0; i < ncl; i++) {
197 if (++mclrefcnt[mtocl(mcl)] != 0)
198 panic("m_clalloc already there");
199 if (((int)mcl & PAGE_MASK) == 0)
200 mcl_paddr[((char *)mcl - (char *)mbutl)/PAGE_SIZE] = pmap_extract(kernel_pmap, (char *)mcl);
201
202 mcl->mcl_next = mclfree;
203 mclfree = mcl++;
204 }
205 mbstat.m_clfree += ncl;
206 mbstat.m_clusters += ncl;
207 return (ncl);
208 } /* else ... */
209 out:
210 MBUF_LOCK();
211
212 /*
213 * When non-blocking we kick the dlil thread if we havve to grow the
214 * pool or if the number of free clusters is less than requested.
215 */
216 if ((nowait == M_DONTWAIT) && (i > 0 || ncl >= mbstat.m_clfree)) {
217 dlil_expand_mcl = 1;
218 if (dlil_initialized)
219 wakeup((caddr_t)&dlil_input_thread_wakeup);
220 }
221
222 if (mbstat.m_clfree >= ncl)
223 return 1;
224
225 mbstat.m_drops++;
226
227 return 0;
228 }
229
230 /*
231 * Add more free mbufs by cutting up a cluster.
232 */
233 m_expand(canwait)
234 int canwait;
235 {
236 register caddr_t mcl;
237
238 if (mbstat.m_clfree < (mbstat.m_clusters >> 4))
239 /* 1/16th of the total number of cluster mbufs allocated is
240 reserved for large packets. The number reserved must
241 always be < 1/2, or future allocation will be prevented.
242 */
243 return 0;
244
245 MCLALLOC(mcl, canwait);
246 if (mcl) {
247 register struct mbuf *m = (struct mbuf *)mcl;
248 register int i = NMBPCL;
249 MBUF_LOCK();
250 mbstat.m_mtypes[MT_FREE] += i;
251 mbstat.m_mbufs += i;
252 while (i--) {
253 m->m_type = MT_FREE;
254 m->m_next = mfree;
255 mfree = m++;
256 }
257 i = m_want;
258 m_want = 0;
259 MBUF_UNLOCK();
260 if (i) wakeup((caddr_t)&mfree);
261 return 1;
262 }
263 return 0;
264 }
265
266 /*
267 * When MGET failes, ask protocols to free space when short of memory,
268 * then re-attempt to allocate an mbuf.
269 */
270 struct mbuf *
271 m_retry(canwait, type)
272 int canwait, type;
273 {
274 #define m_retry(h, t) 0
275 register struct mbuf *m;
276 int wait, s;
277 funnel_t * fnl;
278 int fnl_switch = 0;
279 boolean_t funnel_state;
280
281 for (;;) {
282 (void) m_expand(canwait);
283 MGET(m, XXX, type);
284 if (m || canwait == M_DONTWAIT)
285 break;
286 MBUF_LOCK();
287 wait = m_want++;
288
289 dlil_expand_mcl = 1;
290 MBUF_UNLOCK();
291
292 if (dlil_initialized)
293 wakeup((caddr_t)&dlil_input_thread_wakeup);
294
295 if (wait == 0) {
296 mbstat.m_drain++;
297 }
298 else {
299 assert_wait((caddr_t)&mfree, THREAD_UNINT);
300 mbstat.m_wait++;
301 }
302
303 /*
304 * Grab network funnel because m_reclaim calls into the
305 * socket domains and tsleep end-up calling splhigh
306 */
307 fnl = thread_funnel_get();
308 if (fnl && (fnl == kernel_flock)) {
309 fnl_switch = 1;
310 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
311 } else
312 funnel_state = thread_funnel_set(network_flock, TRUE);
313 if (wait == 0) {
314 m_reclaim();
315 } else {
316 /* Sleep with a small timeout as insurance */
317 (void) tsleep((caddr_t)0, PZERO-1, "m_retry", hz);
318 }
319 if (fnl_switch)
320 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
321 else
322 thread_funnel_set(network_flock, funnel_state);
323 }
324 return (m);
325 #undef m_retry
326 }
327
328 /*
329 * As above; retry an MGETHDR.
330 */
331 struct mbuf *
332 m_retryhdr(canwait, type)
333 int canwait, type;
334 {
335 register struct mbuf *m;
336
337 if (m = m_retry(canwait, type)) {
338 m->m_flags |= M_PKTHDR;
339 m->m_data = m->m_pktdat;
340 m->m_pkthdr.rcvif = NULL;
341 m->m_pkthdr.len = 0;
342 m->m_pkthdr.header = NULL;
343 m->m_pkthdr.csum_flags = 0;
344 m->m_pkthdr.csum_data = 0;
345 m->m_pkthdr.aux = (struct mbuf *)NULL;
346 m->m_pkthdr.reserved1 = NULL;
347 m->m_pkthdr.reserved2 = NULL;
348 }
349 return (m);
350 }
351
352 m_reclaim()
353 {
354 register struct domain *dp;
355 register struct protosw *pr;
356
357 for (dp = domains; dp; dp = dp->dom_next)
358 for (pr = dp->dom_protosw; pr; pr = pr->pr_next)
359 if (pr->pr_drain)
360 (*pr->pr_drain)();
361 mbstat.m_drain++;
362 }
363
364 /*
365 * Space allocation routines.
366 * These are also available as macros
367 * for critical paths.
368 */
369 struct mbuf *
370 m_get(nowait, type)
371 int nowait, type;
372 {
373 register struct mbuf *m;
374
375 MGET(m, nowait, type);
376 return (m);
377 }
378
379 struct mbuf *
380 m_gethdr(nowait, type)
381 int nowait, type;
382 {
383 register struct mbuf *m;
384
385 MGETHDR(m, nowait, type);
386 return (m);
387 }
388
389 struct mbuf *
390 m_getclr(nowait, type)
391 int nowait, type;
392 {
393 register struct mbuf *m;
394
395 MGET(m, nowait, type);
396 if (m == 0)
397 return (0);
398 bzero(mtod(m, caddr_t), MLEN);
399 return (m);
400 }
401
402 struct mbuf *
403 m_free(m)
404 struct mbuf *m;
405 {
406 struct mbuf *n = m->m_next;
407 int i, s;
408
409 if (m->m_type == MT_FREE)
410 panic("freeing free mbuf");
411
412 MBUF_LOCK();
413 if (m->m_flags & M_EXT) {
414 if (MCLHASREFERENCE(m)) {
415 remque((queue_t)&m->m_ext.ext_refs);
416 } else if (m->m_ext.ext_free == NULL) {
417 union mcluster *mcl= (union mcluster *)m->m_ext.ext_buf;
418 if (MCLUNREF(mcl)) {
419 mcl->mcl_next = mclfree;
420 mclfree = mcl;
421 ++mbstat.m_clfree;
422 }
423 #ifdef COMMENT_OUT
424 /* *** Since m_split() increments "mclrefcnt[mtocl(m->m_ext.ext_buf)]",
425 and AppleTalk ADSP uses m_split(), this incorrect sanity check
426 caused a panic.
427 *** */
428 else /* sanity check - not referenced this way */
429 panic("m_free m_ext cluster not free");
430 #endif
431 } else {
432 (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
433 m->m_ext.ext_size, m->m_ext.ext_arg);
434 }
435 }
436 mbstat.m_mtypes[m->m_type]--;
437 (void) MCLUNREF(m);
438 m->m_type = MT_FREE;
439 mbstat.m_mtypes[m->m_type]++;
440 m->m_flags = 0;
441 m->m_next = mfree;
442 m->m_len = 0;
443 mfree = m;
444 i = m_want;
445 m_want = 0;
446 MBUF_UNLOCK();
447 if (i) wakeup((caddr_t)&mfree);
448 return (n);
449 }
450
451 /* Best effort to get a mbuf cluster + pkthdr under one lock.
452 * If we don't have them avail, just bail out and use the regular
453 * path.
454 * Used by drivers to allocated packets on receive ring.
455 */
456 struct mbuf *
457 m_getpacket(void)
458 {
459 struct mbuf *m;
460 m_clalloc(1, M_DONTWAIT); /* takes the MBUF_LOCK, but doesn't release it... */
461 if ((mfree != 0) && (mclfree != 0)) { /* mbuf + cluster are available */
462 m = mfree;
463 mfree = m->m_next;
464 MCHECK(m);
465 ++mclrefcnt[mtocl(m)];
466 mbstat.m_mtypes[MT_FREE]--;
467 mbstat.m_mtypes[MT_DATA]++;
468 m->m_ext.ext_buf = (caddr_t)mclfree; /* get the cluster */
469 ++mclrefcnt[mtocl(m->m_ext.ext_buf)];
470 mbstat.m_clfree--;
471 mclfree = ((union mcluster *)(m->m_ext.ext_buf))->mcl_next;
472
473 m->m_next = m->m_nextpkt = 0;
474 m->m_type = MT_DATA;
475 m->m_data = m->m_ext.ext_buf;
476 m->m_flags = M_PKTHDR | M_EXT;
477 m->m_pkthdr.len = 0;
478 m->m_pkthdr.rcvif = NULL;
479 m->m_pkthdr.header = NULL;
480 m->m_pkthdr.csum_data = 0;
481 m->m_pkthdr.csum_flags = 0;
482 m->m_pkthdr.aux = (struct mbuf *)NULL;
483 m->m_pkthdr.reserved1 = 0;
484 m->m_pkthdr.reserved2 = 0;
485 m->m_ext.ext_free = 0;
486 m->m_ext.ext_size = MCLBYTES;
487 m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward =
488 &m->m_ext.ext_refs;
489 MBUF_UNLOCK();
490 }
491 else { /* slow path: either mbuf or cluster need to be allocated anyway */
492 MBUF_UNLOCK();
493
494 MGETHDR(m, M_WAITOK, MT_DATA );
495
496 if ( m == 0 )
497 return (NULL);
498
499 MCLGET( m, M_WAITOK );
500 if ( ( m->m_flags & M_EXT ) == 0 )
501 {
502 m_free(m); m = 0;
503 }
504 }
505 return (m);
506 }
507
508
509 struct mbuf *
510 m_getpackets(int num_needed, int num_with_pkthdrs, int how)
511 {
512 struct mbuf *m;
513 struct mbuf **np, *top;
514
515 top = NULL;
516 np = &top;
517
518 m_clalloc(num_needed, how); /* takes the MBUF_LOCK, but doesn't release it... */
519
520 while (num_needed--) {
521 if (mfree && mclfree) { /* mbuf + cluster are available */
522 m = mfree;
523 MCHECK(m);
524 mfree = m->m_next;
525 ++mclrefcnt[mtocl(m)];
526 mbstat.m_mtypes[MT_FREE]--;
527 mbstat.m_mtypes[MT_DATA]++;
528 m->m_ext.ext_buf = (caddr_t)mclfree; /* get the cluster */
529 ++mclrefcnt[mtocl(m->m_ext.ext_buf)];
530 mbstat.m_clfree--;
531 mclfree = ((union mcluster *)(m->m_ext.ext_buf))->mcl_next;
532
533 m->m_next = m->m_nextpkt = 0;
534 m->m_type = MT_DATA;
535 m->m_data = m->m_ext.ext_buf;
536 m->m_ext.ext_free = 0;
537 m->m_ext.ext_size = MCLBYTES;
538 m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward = &m->m_ext.ext_refs;
539
540 if (num_with_pkthdrs == 0)
541 m->m_flags = M_EXT;
542 else {
543 m->m_flags = M_PKTHDR | M_EXT;
544 m->m_pkthdr.len = 0;
545 m->m_pkthdr.rcvif = NULL;
546 m->m_pkthdr.header = NULL;
547 m->m_pkthdr.csum_flags = 0;
548 m->m_pkthdr.csum_data = 0;
549 m->m_pkthdr.aux = (struct mbuf *)NULL;
550 m->m_pkthdr.reserved1 = NULL;
551 m->m_pkthdr.reserved2 = NULL;
552
553 num_with_pkthdrs--;
554 }
555
556 } else {
557
558 MBUF_UNLOCK();
559
560 if (num_with_pkthdrs == 0) {
561 MGET(m, how, MT_DATA );
562 } else {
563 MGETHDR(m, how, MT_DATA);
564
565 if (m)
566 m->m_pkthdr.len = 0;
567 num_with_pkthdrs--;
568 }
569 if (m == 0)
570 return(top);
571
572 MCLGET(m, how);
573 if ((m->m_flags & M_EXT) == 0) {
574 m_free(m);
575 return(top);
576 }
577 MBUF_LOCK();
578 }
579 *np = m;
580
581 if (num_with_pkthdrs)
582 np = &m->m_nextpkt;
583 else
584 np = &m->m_next;
585 }
586 MBUF_UNLOCK();
587
588 return (top);
589 }
590
591
592 struct mbuf *
593 m_getpackethdrs(int num_needed, int how)
594 {
595 struct mbuf *m;
596 struct mbuf **np, *top;
597
598 top = NULL;
599 np = &top;
600
601 MBUF_LOCK();
602
603 while (num_needed--) {
604 if (m = mfree) { /* mbufs are available */
605 MCHECK(m);
606 mfree = m->m_next;
607 ++mclrefcnt[mtocl(m)];
608 mbstat.m_mtypes[MT_FREE]--;
609 mbstat.m_mtypes[MT_DATA]++;
610
611 m->m_next = m->m_nextpkt = 0;
612 m->m_type = MT_DATA;
613 m->m_flags = M_PKTHDR;
614 m->m_data = m->m_pktdat;
615 m->m_pkthdr.len = 0;
616 m->m_pkthdr.rcvif = NULL;
617 m->m_pkthdr.header = NULL;
618 m->m_pkthdr.csum_flags = 0;
619 m->m_pkthdr.csum_data = 0;
620 m->m_pkthdr.aux = (struct mbuf *)NULL;
621 m->m_pkthdr.reserved1 = NULL;
622 m->m_pkthdr.reserved2 = NULL;
623
624 } else {
625
626 MBUF_UNLOCK();
627
628 m = m_retryhdr(how, MT_DATA);
629
630 if (m == 0)
631 return(top);
632
633 MBUF_LOCK();
634 }
635 *np = m;
636 np = &m->m_nextpkt;
637 }
638 MBUF_UNLOCK();
639
640 return (top);
641 }
642
643
644 /* free and mbuf list (m_nextpkt) while following m_next under one lock.
645 * returns the count for mbufs packets freed. Used by the drivers.
646 */
647 int
648 m_freem_list(m)
649 struct mbuf *m;
650 {
651 struct mbuf *nextpkt;
652 int i, count=0;
653
654 MBUF_LOCK();
655
656 while (m) {
657 if (m)
658 nextpkt = m->m_nextpkt; /* chain of linked mbufs from driver */
659 else
660 nextpkt = 0;
661 count++;
662
663 while (m) { /* free the mbuf chain (like mfreem) */
664 struct mbuf *n = m->m_next;
665
666 if (n && n->m_nextpkt)
667 panic("m_freem_list: m_nextpkt of m_next != NULL");
668 if (m->m_type == MT_FREE)
669 panic("freeing free mbuf");
670
671 if (m->m_flags & M_EXT) {
672 if (MCLHASREFERENCE(m)) {
673 remque((queue_t)&m->m_ext.ext_refs);
674 } else if (m->m_ext.ext_free == NULL) {
675 union mcluster *mcl= (union mcluster *)m->m_ext.ext_buf;
676 if (MCLUNREF(mcl)) {
677 mcl->mcl_next = mclfree;
678 mclfree = mcl;
679 ++mbstat.m_clfree;
680 }
681 } else {
682 (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
683 m->m_ext.ext_size, m->m_ext.ext_arg);
684 }
685 }
686 mbstat.m_mtypes[m->m_type]--;
687 (void) MCLUNREF(m);
688 mbstat.m_mtypes[MT_FREE]++;
689 m->m_type = MT_FREE;
690 m->m_flags = 0;
691 m->m_len = 0;
692 m->m_next = mfree;
693 mfree = m;
694 m = n;
695 }
696 m = nextpkt; /* bump m with saved nextpkt if any */
697 }
698 if (i = m_want)
699 m_want = 0;
700
701 MBUF_UNLOCK();
702
703 if (i)
704 wakeup((caddr_t)&mfree);
705
706 return (count);
707 }
708
709 void
710 m_freem(m)
711 register struct mbuf *m;
712 {
713 while (m)
714 m = m_free(m);
715 }
716
717 /*
718 * Mbuffer utility routines.
719 */
720 /*
721 * Compute the amount of space available
722 * before the current start of data in an mbuf.
723 */
724 m_leadingspace(m)
725 register struct mbuf *m;
726 {
727 if (m->m_flags & M_EXT) {
728 if (MCLHASREFERENCE(m))
729 return(0);
730 return (m->m_data - m->m_ext.ext_buf);
731 }
732 if (m->m_flags & M_PKTHDR)
733 return (m->m_data - m->m_pktdat);
734 return (m->m_data - m->m_dat);
735 }
736
737 /*
738 * Compute the amount of space available
739 * after the end of data in an mbuf.
740 */
741 m_trailingspace(m)
742 register struct mbuf *m;
743 {
744 if (m->m_flags & M_EXT) {
745 if (MCLHASREFERENCE(m))
746 return(0);
747 return (m->m_ext.ext_buf + m->m_ext.ext_size -
748 (m->m_data + m->m_len));
749 }
750 return (&m->m_dat[MLEN] - (m->m_data + m->m_len));
751 }
752
753 /*
754 * Lesser-used path for M_PREPEND:
755 * allocate new mbuf to prepend to chain,
756 * copy junk along.
757 */
758 struct mbuf *
759 m_prepend(m, len, how)
760 register struct mbuf *m;
761 int len, how;
762 {
763 struct mbuf *mn;
764
765 MGET(mn, how, m->m_type);
766 if (mn == (struct mbuf *)NULL) {
767 m_freem(m);
768 return ((struct mbuf *)NULL);
769 }
770 if (m->m_flags & M_PKTHDR) {
771 M_COPY_PKTHDR(mn, m);
772 m->m_flags &= ~M_PKTHDR;
773 }
774 mn->m_next = m;
775 m = mn;
776 if (len < MHLEN)
777 MH_ALIGN(m, len);
778 m->m_len = len;
779 return (m);
780 }
781
782 /*
783 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
784 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
785 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
786 */
787 int MCFail;
788
789 struct mbuf *
790 m_copym(m, off0, len, wait)
791 register struct mbuf *m;
792 int off0, wait;
793 register int len;
794 {
795 register struct mbuf *n, **np;
796 register int off = off0;
797 struct mbuf *top;
798 int copyhdr = 0;
799
800 if (off < 0 || len < 0)
801 panic("m_copym");
802 if (off == 0 && m->m_flags & M_PKTHDR)
803 copyhdr = 1;
804
805 while (off >= m->m_len) {
806 if (m == 0)
807 panic("m_copym");
808 off -= m->m_len;
809 m = m->m_next;
810 }
811 np = &top;
812 top = 0;
813
814 MBUF_LOCK();
815
816 while (len > 0) {
817 if (m == 0) {
818 if (len != M_COPYALL)
819 panic("m_copym");
820 break;
821 }
822 if (n = mfree) {
823 MCHECK(n);
824 ++mclrefcnt[mtocl(n)];
825 mbstat.m_mtypes[MT_FREE]--;
826 mbstat.m_mtypes[m->m_type]++;
827 mfree = n->m_next;
828 n->m_next = n->m_nextpkt = 0;
829 n->m_type = m->m_type;
830 n->m_data = n->m_dat;
831 n->m_flags = 0;
832 } else {
833 MBUF_UNLOCK();
834 n = m_retry(wait, m->m_type);
835 MBUF_LOCK();
836 }
837 *np = n;
838
839 if (n == 0)
840 goto nospace;
841 if (copyhdr) {
842 M_COPY_PKTHDR(n, m);
843 if (len == M_COPYALL)
844 n->m_pkthdr.len -= off0;
845 else
846 n->m_pkthdr.len = len;
847 copyhdr = 0;
848 }
849 if (len == M_COPYALL) {
850 if (min(len, (m->m_len - off)) == len) {
851 printf("m->m_len %d - off %d = %d, %d\n",
852 m->m_len, off, m->m_len - off,
853 min(len, (m->m_len - off)));
854 }
855 }
856 n->m_len = min(len, (m->m_len - off));
857 if (n->m_len == M_COPYALL) {
858 printf("n->m_len == M_COPYALL, fixing\n");
859 n->m_len = MHLEN;
860 }
861 if (m->m_flags & M_EXT) {
862 n->m_ext = m->m_ext;
863 insque((queue_t)&n->m_ext.ext_refs, (queue_t)&m->m_ext.ext_refs);
864 n->m_data = m->m_data + off;
865 n->m_flags |= M_EXT;
866 } else {
867 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
868 (unsigned)n->m_len);
869 }
870 if (len != M_COPYALL)
871 len -= n->m_len;
872 off = 0;
873 m = m->m_next;
874 np = &n->m_next;
875 }
876 MBUF_UNLOCK();
877
878 if (top == 0)
879 MCFail++;
880
881 return (top);
882 nospace:
883 MBUF_UNLOCK();
884
885 m_freem(top);
886 MCFail++;
887 return (0);
888 }
889
890
891
892 struct mbuf *
893 m_copym_with_hdrs(m, off0, len, wait, m_last, m_off)
894 register struct mbuf *m;
895 int off0, wait;
896 register int len;
897 struct mbuf **m_last;
898 int *m_off;
899 {
900 register struct mbuf *n, **np;
901 register int off = off0;
902 struct mbuf *top = 0;
903 int copyhdr = 0;
904 int type;
905
906 if (off == 0 && m->m_flags & M_PKTHDR)
907 copyhdr = 1;
908
909 if (*m_last) {
910 m = *m_last;
911 off = *m_off;
912 } else {
913 while (off >= m->m_len) {
914 off -= m->m_len;
915 m = m->m_next;
916 }
917 }
918 MBUF_LOCK();
919
920 while (len > 0) {
921 if (top == 0)
922 type = MT_HEADER;
923 else {
924 if (m == 0)
925 panic("m_gethdr_and_copym");
926 type = m->m_type;
927 }
928 if (n = mfree) {
929 MCHECK(n);
930 ++mclrefcnt[mtocl(n)];
931 mbstat.m_mtypes[MT_FREE]--;
932 mbstat.m_mtypes[type]++;
933 mfree = n->m_next;
934 n->m_next = n->m_nextpkt = 0;
935 n->m_type = type;
936
937 if (top) {
938 n->m_data = n->m_dat;
939 n->m_flags = 0;
940 } else {
941 n->m_data = n->m_pktdat;
942 n->m_flags = M_PKTHDR;
943 n->m_pkthdr.len = 0;
944 n->m_pkthdr.rcvif = NULL;
945 n->m_pkthdr.header = NULL;
946 n->m_pkthdr.csum_flags = 0;
947 n->m_pkthdr.csum_data = 0;
948 n->m_pkthdr.aux = (struct mbuf *)NULL;
949 n->m_pkthdr.reserved1 = NULL;
950 n->m_pkthdr.reserved2 = NULL;
951 }
952 } else {
953 MBUF_UNLOCK();
954 if (top)
955 n = m_retry(wait, type);
956 else
957 n = m_retryhdr(wait, type);
958 MBUF_LOCK();
959 }
960 if (n == 0)
961 goto nospace;
962 if (top == 0) {
963 top = n;
964 np = &top->m_next;
965 continue;
966 } else
967 *np = n;
968
969 if (copyhdr) {
970 M_COPY_PKTHDR(n, m);
971 n->m_pkthdr.len = len;
972 copyhdr = 0;
973 }
974 n->m_len = min(len, (m->m_len - off));
975
976 if (m->m_flags & M_EXT) {
977 n->m_ext = m->m_ext;
978 insque((queue_t)&n->m_ext.ext_refs, (queue_t)&m->m_ext.ext_refs);
979 n->m_data = m->m_data + off;
980 n->m_flags |= M_EXT;
981 } else {
982 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
983 (unsigned)n->m_len);
984 }
985 len -= n->m_len;
986
987 if (len == 0) {
988 if ((off + n->m_len) == m->m_len) {
989 *m_last = m->m_next;
990 *m_off = 0;
991 } else {
992 *m_last = m;
993 *m_off = off + n->m_len;
994 }
995 break;
996 }
997 off = 0;
998 m = m->m_next;
999 np = &n->m_next;
1000 }
1001 MBUF_UNLOCK();
1002
1003 return (top);
1004 nospace:
1005 MBUF_UNLOCK();
1006
1007 if (top)
1008 m_freem(top);
1009 MCFail++;
1010 return (0);
1011 }
1012
1013
1014 /*
1015 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1016 * continuing for "len" bytes, into the indicated buffer.
1017 */
1018 void m_copydata(m, off, len, cp)
1019 register struct mbuf *m;
1020 register int off;
1021 register int len;
1022 caddr_t cp;
1023 {
1024 register unsigned count;
1025
1026 if (off < 0 || len < 0)
1027 panic("m_copydata");
1028 while (off > 0) {
1029 if (m == 0)
1030 panic("m_copydata");
1031 if (off < m->m_len)
1032 break;
1033 off -= m->m_len;
1034 m = m->m_next;
1035 }
1036 while (len > 0) {
1037 if (m == 0)
1038 panic("m_copydata");
1039 count = min(m->m_len - off, len);
1040 bcopy(mtod(m, caddr_t) + off, cp, count);
1041 len -= count;
1042 cp += count;
1043 off = 0;
1044 m = m->m_next;
1045 }
1046 }
1047
1048 /*
1049 * Concatenate mbuf chain n to m.
1050 * Both chains must be of the same type (e.g. MT_DATA).
1051 * Any m_pkthdr is not updated.
1052 */
1053 void m_cat(m, n)
1054 register struct mbuf *m, *n;
1055 {
1056 while (m->m_next)
1057 m = m->m_next;
1058 while (n) {
1059 if (m->m_flags & M_EXT ||
1060 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1061 /* just join the two chains */
1062 m->m_next = n;
1063 return;
1064 }
1065 /* splat the data from one into the other */
1066 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1067 (u_int)n->m_len);
1068 m->m_len += n->m_len;
1069 n = m_free(n);
1070 }
1071 }
1072
1073 void
1074 m_adj(mp, req_len)
1075 struct mbuf *mp;
1076 int req_len;
1077 {
1078 register int len = req_len;
1079 register struct mbuf *m;
1080 register count;
1081
1082 if ((m = mp) == NULL)
1083 return;
1084 if (len >= 0) {
1085 /*
1086 * Trim from head.
1087 */
1088 while (m != NULL && len > 0) {
1089 if (m->m_len <= len) {
1090 len -= m->m_len;
1091 m->m_len = 0;
1092 m = m->m_next;
1093 } else {
1094 m->m_len -= len;
1095 m->m_data += len;
1096 len = 0;
1097 }
1098 }
1099 m = mp;
1100 if (m->m_flags & M_PKTHDR)
1101 m->m_pkthdr.len -= (req_len - len);
1102 } else {
1103 /*
1104 * Trim from tail. Scan the mbuf chain,
1105 * calculating its length and finding the last mbuf.
1106 * If the adjustment only affects this mbuf, then just
1107 * adjust and return. Otherwise, rescan and truncate
1108 * after the remaining size.
1109 */
1110 len = -len;
1111 count = 0;
1112 for (;;) {
1113 count += m->m_len;
1114 if (m->m_next == (struct mbuf *)0)
1115 break;
1116 m = m->m_next;
1117 }
1118 if (m->m_len >= len) {
1119 m->m_len -= len;
1120 m = mp;
1121 if (m->m_flags & M_PKTHDR)
1122 m->m_pkthdr.len -= len;
1123 return;
1124 }
1125 count -= len;
1126 if (count < 0)
1127 count = 0;
1128 /*
1129 * Correct length for chain is "count".
1130 * Find the mbuf with last data, adjust its length,
1131 * and toss data from remaining mbufs on chain.
1132 */
1133 m = mp;
1134 if (m->m_flags & M_PKTHDR)
1135 m->m_pkthdr.len = count;
1136 for (; m; m = m->m_next) {
1137 if (m->m_len >= count) {
1138 m->m_len = count;
1139 break;
1140 }
1141 count -= m->m_len;
1142 }
1143 while (m = m->m_next)
1144 m->m_len = 0;
1145 }
1146 }
1147
1148 /*
1149 * Rearange an mbuf chain so that len bytes are contiguous
1150 * and in the data area of an mbuf (so that mtod and dtom
1151 * will work for a structure of size len). Returns the resulting
1152 * mbuf chain on success, frees it and returns null on failure.
1153 * If there is room, it will add up to max_protohdr-len extra bytes to the
1154 * contiguous region in an attempt to avoid being called next time.
1155 */
1156 int MPFail;
1157
1158 struct mbuf *
1159 m_pullup(n, len)
1160 register struct mbuf *n;
1161 int len;
1162 {
1163 register struct mbuf *m;
1164 register int count;
1165 int space;
1166
1167 /*
1168 * If first mbuf has no cluster, and has room for len bytes
1169 * without shifting current data, pullup into it,
1170 * otherwise allocate a new mbuf to prepend to the chain.
1171 */
1172 if ((n->m_flags & M_EXT) == 0 &&
1173 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1174 if (n->m_len >= len)
1175 return (n);
1176 m = n;
1177 n = n->m_next;
1178 len -= m->m_len;
1179 } else {
1180 if (len > MHLEN)
1181 goto bad;
1182 MGET(m, M_DONTWAIT, n->m_type);
1183 if (m == 0)
1184 goto bad;
1185 m->m_len = 0;
1186 if (n->m_flags & M_PKTHDR) {
1187 M_COPY_PKTHDR(m, n);
1188 n->m_flags &= ~M_PKTHDR;
1189 }
1190 }
1191 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1192 do {
1193 count = min(min(max(len, max_protohdr), space), n->m_len);
1194 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1195 (unsigned)count);
1196 len -= count;
1197 m->m_len += count;
1198 n->m_len -= count;
1199 space -= count;
1200 if (n->m_len)
1201 n->m_data += count;
1202 else
1203 n = m_free(n);
1204 } while (len > 0 && n);
1205 if (len > 0) {
1206 (void) m_free(m);
1207 goto bad;
1208 }
1209 m->m_next = n;
1210 return (m);
1211 bad:
1212 m_freem(n);
1213 MPFail++;
1214 return (0);
1215 }
1216
1217 /*
1218 * Partition an mbuf chain in two pieces, returning the tail --
1219 * all but the first len0 bytes. In case of failure, it returns NULL and
1220 * attempts to restore the chain to its original state.
1221 */
1222 struct mbuf *
1223 m_split(m0, len0, wait)
1224 register struct mbuf *m0;
1225 int len0, wait;
1226 {
1227 register struct mbuf *m, *n;
1228 unsigned len = len0, remain;
1229
1230 for (m = m0; m && len > m->m_len; m = m->m_next)
1231 len -= m->m_len;
1232 if (m == 0)
1233 return (0);
1234 remain = m->m_len - len;
1235 if (m0->m_flags & M_PKTHDR) {
1236 MGETHDR(n, wait, m0->m_type);
1237 if (n == 0)
1238 return (0);
1239 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1240 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1241 m0->m_pkthdr.len = len0;
1242 if (m->m_flags & M_EXT)
1243 goto extpacket;
1244 if (remain > MHLEN) {
1245 /* m can't be the lead packet */
1246 MH_ALIGN(n, 0);
1247 n->m_next = m_split(m, len, wait);
1248 if (n->m_next == 0) {
1249 (void) m_free(n);
1250 return (0);
1251 } else
1252 return (n);
1253 } else
1254 MH_ALIGN(n, remain);
1255 } else if (remain == 0) {
1256 n = m->m_next;
1257 m->m_next = 0;
1258 return (n);
1259 } else {
1260 MGET(n, wait, m->m_type);
1261 if (n == 0)
1262 return (0);
1263 M_ALIGN(n, remain);
1264 }
1265 extpacket:
1266 if (m->m_flags & M_EXT) {
1267 n->m_flags |= M_EXT;
1268 MBUF_LOCK();
1269 n->m_ext = m->m_ext;
1270 insque((queue_t)&n->m_ext.ext_refs, (queue_t)&m->m_ext.ext_refs);
1271 MBUF_UNLOCK();
1272 n->m_data = m->m_data + len;
1273 } else {
1274 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1275 }
1276 n->m_len = remain;
1277 m->m_len = len;
1278 n->m_next = m->m_next;
1279 m->m_next = 0;
1280 return (n);
1281 }
1282 /*
1283 * Routine to copy from device local memory into mbufs.
1284 */
1285 struct mbuf *
1286 m_devget(buf, totlen, off0, ifp, copy)
1287 char *buf;
1288 int totlen, off0;
1289 struct ifnet *ifp;
1290 void (*copy)();
1291 {
1292 register struct mbuf *m;
1293 struct mbuf *top = 0, **mp = &top;
1294 register int off = off0, len;
1295 register char *cp;
1296 char *epkt;
1297
1298 cp = buf;
1299 epkt = cp + totlen;
1300 if (off) {
1301 /*
1302 * If 'off' is non-zero, packet is trailer-encapsulated,
1303 * so we have to skip the type and length fields.
1304 */
1305 cp += off + 2 * sizeof(u_int16_t);
1306 totlen -= 2 * sizeof(u_int16_t);
1307 }
1308 MGETHDR(m, M_DONTWAIT, MT_DATA);
1309 if (m == 0)
1310 return (0);
1311 m->m_pkthdr.rcvif = ifp;
1312 m->m_pkthdr.len = totlen;
1313 m->m_len = MHLEN;
1314
1315 while (totlen > 0) {
1316 if (top) {
1317 MGET(m, M_DONTWAIT, MT_DATA);
1318 if (m == 0) {
1319 m_freem(top);
1320 return (0);
1321 }
1322 m->m_len = MLEN;
1323 }
1324 len = min(totlen, epkt - cp);
1325 if (len >= MINCLSIZE) {
1326 MCLGET(m, M_DONTWAIT);
1327 if (m->m_flags & M_EXT)
1328 m->m_len = len = min(len, MCLBYTES);
1329 else {
1330 /* give up when it's out of cluster mbufs */
1331 if (top)
1332 m_freem(top);
1333 m_freem(m);
1334 return (0);
1335 }
1336 } else {
1337 /*
1338 * Place initial small packet/header at end of mbuf.
1339 */
1340 if (len < m->m_len) {
1341 if (top == 0 && len + max_linkhdr <= m->m_len)
1342 m->m_data += max_linkhdr;
1343 m->m_len = len;
1344 } else
1345 len = m->m_len;
1346 }
1347 if (copy)
1348 copy(cp, mtod(m, caddr_t), (unsigned)len);
1349 else
1350 bcopy(cp, mtod(m, caddr_t), (unsigned)len);
1351 cp += len;
1352 *mp = m;
1353 mp = &m->m_next;
1354 totlen -= len;
1355 if (cp == epkt)
1356 cp = buf;
1357 }
1358 return (top);
1359 }
1360
1361 /*
1362 * Cluster freelist allocation check. The mbuf lock must be held.
1363 * Ensure hysteresis between hi/lo.
1364 */
1365 static int
1366 m_howmany()
1367 {
1368 register int i;
1369
1370 /* Under minimum */
1371 if (mbstat.m_clusters < MINCL)
1372 return (MINCL - mbstat.m_clusters);
1373 /* Too few (free < 1/2 total) and not over maximum */
1374 if (mbstat.m_clusters < nmbclusters &&
1375 (i = ((mbstat.m_clusters >> 1) - mbstat.m_clfree)) > 0)
1376 return i;
1377 return 0;
1378 }
1379
1380
1381 /*
1382 * Copy data from a buffer back into the indicated mbuf chain,
1383 * starting "off" bytes from the beginning, extending the mbuf
1384 * chain if necessary.
1385 */
1386 void
1387 m_copyback(m0, off, len, cp)
1388 struct mbuf *m0;
1389 register int off;
1390 register int len;
1391 caddr_t cp;
1392 {
1393 register int mlen;
1394 register struct mbuf *m = m0, *n;
1395 int totlen = 0;
1396
1397 if (m0 == 0)
1398 return;
1399 while (off > (mlen = m->m_len)) {
1400 off -= mlen;
1401 totlen += mlen;
1402 if (m->m_next == 0) {
1403 n = m_getclr(M_DONTWAIT, m->m_type);
1404 if (n == 0)
1405 goto out;
1406 n->m_len = min(MLEN, len + off);
1407 m->m_next = n;
1408 }
1409 m = m->m_next;
1410 }
1411 while (len > 0) {
1412 mlen = min (m->m_len - off, len);
1413 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1414 cp += mlen;
1415 len -= mlen;
1416 mlen += off;
1417 off = 0;
1418 totlen += mlen;
1419 if (len == 0)
1420 break;
1421 if (m->m_next == 0) {
1422 n = m_get(M_DONTWAIT, m->m_type);
1423 if (n == 0)
1424 break;
1425 n->m_len = min(MLEN, len);
1426 m->m_next = n;
1427 }
1428 m = m->m_next;
1429 }
1430 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1431 m->m_pkthdr.len = totlen;
1432 }
1433
1434
1435 char *mcl_to_paddr(register char *addr) {
1436 register int base_phys;
1437
1438 if (addr < (char *)mbutl || addr >= (char *)embutl)
1439 return (0);
1440 base_phys = mcl_paddr[(addr - (char *)mbutl) >> PAGE_SHIFT];
1441
1442 if (base_phys == 0)
1443 return (0);
1444 return ((char *)((int)base_phys | ((int)addr & PAGE_MASK)));
1445 }
1446
1447 /*
1448 * Dup the mbuf chain passed in. The whole thing. No cute additional cruft.
1449 * And really copy the thing. That way, we don't "precompute" checksums
1450 * for unsuspecting consumers.
1451 * Assumption: m->m_nextpkt == 0.
1452 * Trick: for small packets, don't dup into a cluster. That way received
1453 * packets don't take up too much room in the sockbuf (cf. sbspace()).
1454 */
1455 int MDFail;
1456
1457 struct mbuf *
1458 m_dup(register struct mbuf *m, int how)
1459 { register struct mbuf *n, **np;
1460 struct mbuf *top;
1461 int copyhdr = 0;
1462
1463 np = &top;
1464 top = 0;
1465 if (m->m_flags & M_PKTHDR)
1466 copyhdr = 1;
1467
1468 /*
1469 * Quick check: if we have one mbuf and its data fits in an
1470 * mbuf with packet header, just copy and go.
1471 */
1472 if (m->m_next == NULL)
1473 { /* Then just move the data into an mbuf and be done... */
1474 if (copyhdr)
1475 { if (m->m_pkthdr.len <= MHLEN)
1476 { if ((n = m_gethdr(how, m->m_type)) == NULL)
1477 return(NULL);
1478 n->m_len = m->m_len;
1479 n->m_flags |= (m->m_flags & M_COPYFLAGS);
1480 n->m_pkthdr.len = m->m_pkthdr.len;
1481 n->m_pkthdr.rcvif = m->m_pkthdr.rcvif;
1482 n->m_pkthdr.header = NULL;
1483 n->m_pkthdr.csum_flags = 0;
1484 n->m_pkthdr.csum_data = 0;
1485 n->m_pkthdr.aux = NULL;
1486 n->m_pkthdr.reserved1 = 0;
1487 n->m_pkthdr.reserved2 = 0;
1488 bcopy(m->m_data, n->m_data, m->m_pkthdr.len);
1489 return(n);
1490 }
1491 } else if (m->m_len <= MLEN)
1492 { if ((n = m_get(how, m->m_type)) == NULL)
1493 return(NULL);
1494 bcopy(m->m_data, n->m_data, m->m_len);
1495 n->m_len = m->m_len;
1496 return(n);
1497 }
1498 }
1499 while (m)
1500 {
1501 #if BLUE_DEBUG
1502 kprintf("<%x: %x, %x, %x\n", m, m->m_flags, m->m_len,
1503 m->m_data);
1504 #endif
1505 if (copyhdr)
1506 n = m_gethdr(how, m->m_type);
1507 else
1508 n = m_get(how, m->m_type);
1509 if (n == 0)
1510 goto nospace;
1511 if (m->m_flags & M_EXT)
1512 { MCLGET(n, how);
1513 if ((n->m_flags & M_EXT) == 0)
1514 goto nospace;
1515 }
1516 *np = n;
1517 if (copyhdr)
1518 { /* Don't use M_COPY_PKTHDR: preserve m_data */
1519 n->m_pkthdr = m->m_pkthdr;
1520 n->m_flags |= (m->m_flags & M_COPYFLAGS);
1521 copyhdr = 0;
1522 if ((n->m_flags & M_EXT) == 0)
1523 n->m_data = n->m_pktdat;
1524 }
1525 n->m_len = m->m_len;
1526 /*
1527 * Get the dup on the same bdry as the original
1528 * Assume that the two mbufs have the same offset to data area
1529 * (up to word bdries)
1530 */
1531 bcopy(mtod(m, caddr_t), mtod(n, caddr_t), (unsigned)n->m_len);
1532 m = m->m_next;
1533 np = &n->m_next;
1534 #if BLUE_DEBUG
1535 kprintf(">%x: %x, %x, %x\n", n, n->m_flags, n->m_len,
1536 n->m_data);
1537 #endif
1538 }
1539
1540 if (top == 0)
1541 MDFail++;
1542 return (top);
1543 nospace:
1544 m_freem(top);
1545 MDFail++;
1546 return (0);
1547 }
1548
1549 #if 0
1550 #include <sys/sysctl.h>
1551
1552 static int mhog_num = 0;
1553 static struct mbuf *mhog_chain = 0;
1554 static int mhog_wait = 1;
1555
1556 static int
1557 sysctl_mhog_num SYSCTL_HANDLER_ARGS
1558 {
1559 int old = mhog_num;
1560 int error;
1561
1562 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
1563 if (!error && req->newptr) {
1564 int i;
1565 struct mbuf *m;
1566
1567 if (mhog_chain) {
1568 m_freem(mhog_chain);
1569 mhog_chain = 0;
1570 }
1571
1572 for (i = 0; i < mhog_num; i++) {
1573 MGETHDR(m, mhog_wait ? M_WAIT : M_DONTWAIT, MT_DATA);
1574 if (m == 0)
1575 break;
1576
1577 MCLGET(m, mhog_wait ? M_WAIT : M_DONTWAIT);
1578 if ((m->m_flags & M_EXT) == 0) {
1579 m_free(m);
1580 m = 0;
1581 break;
1582 }
1583 m->m_next = mhog_chain;
1584 mhog_chain = m;
1585 }
1586 mhog_num = i;
1587 }
1588
1589 return error;
1590 }
1591
1592 SYSCTL_NODE(_kern_ipc, OID_AUTO, mhog, CTLFLAG_RW, 0, "mbuf hog");
1593
1594 SYSCTL_PROC(_kern_ipc_mhog, OID_AUTO, cluster, CTLTYPE_INT|CTLFLAG_RW,
1595 &mhog_num, 0, &sysctl_mhog_num, "I", "");
1596 SYSCTL_INT(_kern_ipc_mhog, OID_AUTO, wait, CTLFLAG_RW, &mhog_wait,
1597 0, "");
1598 #endif
1599