]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/uipc_mbuf.c
xnu-344.23.tar.gz
[apple/xnu.git] / bsd / kern / uipc_mbuf.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
de355530
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
de355530
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
de355530
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23/*
24 * Copyright (c) 1982, 1986, 1988, 1991, 1993
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
56 */
57/* HISTORY
58 *
59 * 10/15/97 Annette DeSchon (deschon@apple.com)
60 * Fixed bug in which all cluster mbufs were broken up
61 * into regular mbufs: Some clusters are now reserved.
62 * When a cluster is needed, regular mbufs are no longer
63 * used. (Radar 1683621)
64 * 20-May-95 Mac Gillon (mgillon) at NeXT
65 * New version based on 4.4
66 */
67
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/malloc.h>
71#include <sys/mbuf.h>
72#include <sys/kernel.h>
73#include <sys/syslog.h>
74#include <sys/protosw.h>
75#include <sys/domain.h>
76#include <net/netisr.h>
77
78#include <kern/queue.h>
9bccf70c
A
79#include <kern/kern_types.h>
80#include <kern/sched_prim.h>
81
82#define _MCLREF(p) (++mclrefcnt[mtocl(p)])
83#define _MCLUNREF(p) (--mclrefcnt[mtocl(p)] == 0)
1c79356b 84
de355530 85extern kernel_pmap; /* The kernel's pmap */
1c79356b
A
86
87decl_simple_lock_data(, mbuf_slock);
88struct mbuf *mfree; /* mbuf free list */
89struct mbuf *mfreelater; /* mbuf deallocation list */
90extern vm_map_t mb_map; /* special map */
91int m_want; /* sleepers on mbufs */
92extern int nmbclusters; /* max number of mapped clusters */
93short *mclrefcnt; /* mapped cluster reference counts */
94int *mcl_paddr;
95union mcluster *mclfree; /* mapped cluster free list */
96int max_linkhdr; /* largest link-level header */
97int max_protohdr; /* largest protocol header */
98int max_hdr; /* largest link+protocol header */
99int max_datalen; /* MHLEN - max_hdr */
100struct mbstat mbstat; /* statistics */
101union mcluster *mbutl; /* first mapped cluster address */
102union mcluster *embutl; /* ending virtual address of mclusters */
103
104static int nclpp; /* # clusters per physical page */
105static char mbfail[] = "mbuf not mapped";
106
107static int m_howmany();
108
109/* The number of cluster mbufs that are allocated, to start. */
110#define MINCL max(16, 2)
111
112extern int dlil_input_thread_wakeup;
113extern int dlil_expand_mcl;
114extern int dlil_initialized;
115
9bccf70c
A
116#if 0
117static int mfree_munge = 0;
118#if 0
119#define _MFREE_MUNGE(m) { \
120 if (mfree_munge) \
121 { int i; \
122 vm_offset_t *element = (vm_offset_t *)(m); \
123 for (i = 0; \
124 i < sizeof(struct mbuf)/sizeof(vm_offset_t); \
125 i++) \
126 (element)[i] = 0xdeadbeef; \
127 } \
128}
129#else
130void
131munge_mbuf(struct mbuf *m)
132{
133 int i;
134 vm_offset_t *element = (vm_offset_t *)(m);
135 for (i = 0;
136 i < sizeof(struct mbuf)/sizeof(vm_offset_t);
137 i++)
138 (element)[i] = 0xdeadbeef;
139}
140#define _MFREE_MUNGE(m) { \
141 if (mfree_munge) \
142 munge_mbuf(m); \
143}
144#endif
145#else
146#define _MFREE_MUNGE(m)
147#endif
148
149
150#define _MINTGET(m, type) { \
151 MBUF_LOCK(); \
152 if (((m) = mfree) != 0) { \
153 MCHECK(m); \
154 ++mclrefcnt[mtocl(m)]; \
155 mbstat.m_mtypes[MT_FREE]--; \
156 mbstat.m_mtypes[(type)]++; \
157 mfree = (m)->m_next; \
158 } \
159 MBUF_UNLOCK(); \
160}
161
1c79356b
A
162
163void
164mbinit()
165{
166 int s,m;
167 int initmcl = 32;
168
169 if (nclpp)
170 return;
de355530 171 nclpp = round_page(MCLBYTES) / MCLBYTES; /* see mbufgc() */
1c79356b
A
172 if (nclpp < 1) nclpp = 1;
173 MBUF_LOCKINIT();
174// NETISR_LOCKINIT();
fa4905b1
A
175
176 mbstat.m_msize = MSIZE;
177 mbstat.m_mclbytes = MCLBYTES;
178 mbstat.m_minclsize = MINCLSIZE;
179 mbstat.m_mlen = MLEN;
180 mbstat.m_mhlen = MHLEN;
181
1c79356b
A
182 if (nmbclusters == 0)
183 nmbclusters = NMBCLUSTERS;
184 MALLOC(mclrefcnt, short *, nmbclusters * sizeof (short),
185 M_TEMP, M_WAITOK);
186 if (mclrefcnt == 0)
187 panic("mbinit");
188 for (m = 0; m < nmbclusters; m++)
189 mclrefcnt[m] = -1;
190
de355530
A
191 MALLOC(mcl_paddr, int *, (nmbclusters/(PAGE_SIZE/CLBYTES)) * sizeof (int),
192 M_TEMP, M_WAITOK);
1c79356b
A
193 if (mcl_paddr == 0)
194 panic("mbinit1");
de355530 195 bzero((char *)mcl_paddr, (nmbclusters/(PAGE_SIZE/CLBYTES)) * sizeof (int));
1c79356b
A
196
197 embutl = (union mcluster *)((unsigned char *)mbutl + (nmbclusters * MCLBYTES));
198
199 PE_parse_boot_arg("initmcl", &initmcl);
200
201 if (m_clalloc(max(PAGE_SIZE/CLBYTES, 1) * initmcl, M_WAIT) == 0)
202 goto bad;
203 MBUF_UNLOCK();
204 return;
205bad:
206 panic("mbinit");
207}
208
209/*
210 * Allocate some number of mbuf clusters
211 * and place on cluster free list.
212 */
213/* ARGSUSED */
214m_clalloc(ncl, nowait)
215 register int ncl;
216 int nowait;
217{
218 register union mcluster *mcl;
219 register int i;
220 vm_size_t size;
221 static char doing_alloc;
222
223 /*
224 * Honor the caller's wish to block or not block.
225 * We have a way to grow the pool asynchronously,
226 * by kicking the dlil_input_thread.
227 */
228 if ((i = m_howmany()) <= 0)
229 goto out;
230
231 if ((nowait == M_DONTWAIT))
232 goto out;
233
234 if (ncl < i)
235 ncl = i;
de355530 236 size = round_page(ncl * MCLBYTES);
1c79356b
A
237 mcl = (union mcluster *)kmem_mb_alloc(mb_map, size);
238
239 if (mcl == 0 && ncl > 1) {
de355530 240 size = round_page(MCLBYTES); /* Try for 1 if failed */
1c79356b
A
241 mcl = (union mcluster *)kmem_mb_alloc(mb_map, size);
242 }
243
244 if (mcl) {
245 MBUF_LOCK();
246 ncl = size / MCLBYTES;
247 for (i = 0; i < ncl; i++) {
248 if (++mclrefcnt[mtocl(mcl)] != 0)
249 panic("m_clalloc already there");
de355530
A
250 if (((int)mcl & PAGE_MASK) == 0)
251 mcl_paddr[((char *)mcl - (char *)mbutl)/PAGE_SIZE] = pmap_extract(kernel_pmap, (char *)mcl);
1c79356b
A
252
253 mcl->mcl_next = mclfree;
254 mclfree = mcl++;
255 }
256 mbstat.m_clfree += ncl;
257 mbstat.m_clusters += ncl;
258 return (ncl);
259 } /* else ... */
260out:
261 MBUF_LOCK();
262
263 /*
264 * When non-blocking we kick the dlil thread if we havve to grow the
265 * pool or if the number of free clusters is less than requested.
266 */
267 if ((nowait == M_DONTWAIT) && (i > 0 || ncl >= mbstat.m_clfree)) {
268 dlil_expand_mcl = 1;
269 if (dlil_initialized)
270 wakeup((caddr_t)&dlil_input_thread_wakeup);
271 }
272
273 if (mbstat.m_clfree >= ncl)
274 return 1;
275
276 mbstat.m_drops++;
277
278 return 0;
279}
280
281/*
282 * Add more free mbufs by cutting up a cluster.
283 */
284m_expand(canwait)
285 int canwait;
286{
287 register caddr_t mcl;
288
289 if (mbstat.m_clfree < (mbstat.m_clusters >> 4))
290 /* 1/16th of the total number of cluster mbufs allocated is
291 reserved for large packets. The number reserved must
292 always be < 1/2, or future allocation will be prevented.
293 */
294 return 0;
295
296 MCLALLOC(mcl, canwait);
297 if (mcl) {
298 register struct mbuf *m = (struct mbuf *)mcl;
299 register int i = NMBPCL;
300 MBUF_LOCK();
301 mbstat.m_mtypes[MT_FREE] += i;
302 mbstat.m_mbufs += i;
303 while (i--) {
9bccf70c 304 _MFREE_MUNGE(m);
1c79356b
A
305 m->m_type = MT_FREE;
306 m->m_next = mfree;
307 mfree = m++;
308 }
309 i = m_want;
310 m_want = 0;
311 MBUF_UNLOCK();
312 if (i) wakeup((caddr_t)&mfree);
313 return 1;
314 }
315 return 0;
316}
317
318/*
319 * When MGET failes, ask protocols to free space when short of memory,
320 * then re-attempt to allocate an mbuf.
321 */
322struct mbuf *
323m_retry(canwait, type)
324 int canwait, type;
325{
1c79356b
A
326 register struct mbuf *m;
327 int wait, s;
328 funnel_t * fnl;
329 int fnl_switch = 0;
330 boolean_t funnel_state;
331
332 for (;;) {
333 (void) m_expand(canwait);
9bccf70c
A
334 _MINTGET(m, type);
335 if (m) {
336 (m)->m_next = (m)->m_nextpkt = 0;
337 (m)->m_type = (type);
338 (m)->m_data = (m)->m_dat;
339 (m)->m_flags = 0;
340 }
1c79356b
A
341 if (m || canwait == M_DONTWAIT)
342 break;
343 MBUF_LOCK();
344 wait = m_want++;
1c79356b 345 dlil_expand_mcl = 1;
9bccf70c
A
346 if (wait == 0)
347 mbstat.m_drain++;
348 else
349 mbstat.m_wait++;
1c79356b
A
350 MBUF_UNLOCK();
351
352 if (dlil_initialized)
353 wakeup((caddr_t)&dlil_input_thread_wakeup);
354
1c79356b
A
355 /*
356 * Grab network funnel because m_reclaim calls into the
357 * socket domains and tsleep end-up calling splhigh
358 */
359 fnl = thread_funnel_get();
9bccf70c
A
360 if (fnl && (fnl == kernel_flock)) {
361 fnl_switch = 1;
362 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
363 } else
364 funnel_state = thread_funnel_set(network_flock, TRUE);
1c79356b
A
365 if (wait == 0) {
366 m_reclaim();
367 } else {
368 /* Sleep with a small timeout as insurance */
9bccf70c 369 (void) tsleep((caddr_t)&mfree, PZERO-1, "m_retry", hz);
1c79356b
A
370 }
371 if (fnl_switch)
372 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
373 else
374 thread_funnel_set(network_flock, funnel_state);
375 }
376 return (m);
1c79356b
A
377}
378
379/*
380 * As above; retry an MGETHDR.
381 */
382struct mbuf *
383m_retryhdr(canwait, type)
384 int canwait, type;
385{
386 register struct mbuf *m;
387
388 if (m = m_retry(canwait, type)) {
389 m->m_flags |= M_PKTHDR;
390 m->m_data = m->m_pktdat;
fa4905b1
A
391 m->m_pkthdr.rcvif = NULL;
392 m->m_pkthdr.len = 0;
393 m->m_pkthdr.header = NULL;
394 m->m_pkthdr.csum_flags = 0;
395 m->m_pkthdr.csum_data = 0;
396 m->m_pkthdr.aux = (struct mbuf *)NULL;
397 m->m_pkthdr.reserved1 = NULL;
398 m->m_pkthdr.reserved2 = NULL;
1c79356b
A
399 }
400 return (m);
401}
402
403m_reclaim()
404{
405 register struct domain *dp;
406 register struct protosw *pr;
407
408 for (dp = domains; dp; dp = dp->dom_next)
409 for (pr = dp->dom_protosw; pr; pr = pr->pr_next)
410 if (pr->pr_drain)
411 (*pr->pr_drain)();
412 mbstat.m_drain++;
413}
414
415/*
416 * Space allocation routines.
417 * These are also available as macros
418 * for critical paths.
419 */
420struct mbuf *
421m_get(nowait, type)
422 int nowait, type;
423{
424 register struct mbuf *m;
425
9bccf70c
A
426 _MINTGET(m, type);
427 if (m) {
428 m->m_next = m->m_nextpkt = 0;
429 m->m_type = type;
430 m->m_data = m->m_dat;
431 m->m_flags = 0;
432 } else
433 (m) = m_retry(nowait, type);
434
1c79356b
A
435 return (m);
436}
437
438struct mbuf *
439m_gethdr(nowait, type)
440 int nowait, type;
441{
442 register struct mbuf *m;
443
9bccf70c
A
444 _MINTGET(m, type);
445 if (m) {
446 m->m_next = m->m_nextpkt = 0;
447 m->m_type = type;
448 m->m_data = m->m_pktdat;
449 m->m_flags = M_PKTHDR;
450 m->m_pkthdr.rcvif = NULL;
451 m->m_pkthdr.header = NULL;
452 m->m_pkthdr.csum_flags = 0;
453 m->m_pkthdr.csum_data = 0;
454 m->m_pkthdr.aux = (struct mbuf *)NULL;
455 m->m_pkthdr.reserved1 = NULL;
456 m->m_pkthdr.reserved2 = NULL;
457 } else
458 m = m_retryhdr(nowait, type);
459
460 return m;
1c79356b
A
461}
462
463struct mbuf *
464m_getclr(nowait, type)
465 int nowait, type;
466{
467 register struct mbuf *m;
468
469 MGET(m, nowait, type);
470 if (m == 0)
471 return (0);
472 bzero(mtod(m, caddr_t), MLEN);
473 return (m);
474}
475
476struct mbuf *
477m_free(m)
478 struct mbuf *m;
479{
480 struct mbuf *n = m->m_next;
481 int i, s;
482
483 if (m->m_type == MT_FREE)
484 panic("freeing free mbuf");
485
9bccf70c
A
486 /* Free the aux data if there is any */
487 if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.aux)
488 {
489 m_freem(m->m_pkthdr.aux);
490 }
491
1c79356b 492 MBUF_LOCK();
9bccf70c
A
493 if ((m->m_flags & M_EXT))
494 {
1c79356b
A
495 if (MCLHASREFERENCE(m)) {
496 remque((queue_t)&m->m_ext.ext_refs);
497 } else if (m->m_ext.ext_free == NULL) {
498 union mcluster *mcl= (union mcluster *)m->m_ext.ext_buf;
9bccf70c 499 if (_MCLUNREF(mcl)) {
1c79356b
A
500 mcl->mcl_next = mclfree;
501 mclfree = mcl;
502 ++mbstat.m_clfree;
503 }
504#ifdef COMMENT_OUT
505/* *** Since m_split() increments "mclrefcnt[mtocl(m->m_ext.ext_buf)]",
506 and AppleTalk ADSP uses m_split(), this incorrect sanity check
507 caused a panic.
508*** */
509 else /* sanity check - not referenced this way */
510 panic("m_free m_ext cluster not free");
511#endif
512 } else {
513 (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
514 m->m_ext.ext_size, m->m_ext.ext_arg);
515 }
516 }
517 mbstat.m_mtypes[m->m_type]--;
9bccf70c
A
518 (void) _MCLUNREF(m);
519 _MFREE_MUNGE(m);
1c79356b
A
520 m->m_type = MT_FREE;
521 mbstat.m_mtypes[m->m_type]++;
522 m->m_flags = 0;
523 m->m_next = mfree;
524 m->m_len = 0;
525 mfree = m;
526 i = m_want;
527 m_want = 0;
528 MBUF_UNLOCK();
529 if (i) wakeup((caddr_t)&mfree);
530 return (n);
531}
532
9bccf70c
A
533/* m_mclget() add an mbuf cluster to a normal mbuf */
534struct mbuf *
535m_mclget(m, nowait)
536 struct mbuf *m;
537 int nowait;
538{
539 MCLALLOC(m->m_ext.ext_buf, nowait);
540 if (m->m_ext.ext_buf) {
541 m->m_data = m->m_ext.ext_buf;
542 m->m_flags |= M_EXT;
543 m->m_ext.ext_size = MCLBYTES;
544 m->m_ext.ext_free = 0;
545 m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward =
546 &m->m_ext.ext_refs;
547 }
548
549 return m;
550}
551
552/* m_mclalloc() allocate an mbuf cluster */
553caddr_t
554m_mclalloc( nowait)
555 int nowait;
556{
557 caddr_t p;
558
559 (void)m_clalloc(1, nowait);
560 if ((p = (caddr_t)mclfree)) {
561 ++mclrefcnt[mtocl(p)];
562 mbstat.m_clfree--;
563 mclfree = ((union mcluster *)p)->mcl_next;
564 }
565 MBUF_UNLOCK();
566
567 return p;
568}
569
570/* m_mclfree() releases a reference to a cluster allocated by MCLALLOC,
571 * freeing the cluster if the reference count has reached 0. */
572void
573m_mclfree(p)
574 caddr_t p;
575{
576 MBUF_LOCK();
577 if (--mclrefcnt[mtocl(p)] == 0) {
578 ((union mcluster *)(p))->mcl_next = mclfree;
579 mclfree = (union mcluster *)(p);
580 mbstat.m_clfree++;
581 }
582 MBUF_UNLOCK();
583}
584
585/* mcl_hasreference() checks if a cluster of an mbuf is referenced by another mbuf */
586int
587m_mclhasreference(m)
588 struct mbuf *m;
589{
590 return (m->m_ext.ext_refs.forward != &(m->m_ext.ext_refs));
591}
592
593/* */
594void
595m_copy_pkthdr(to, from)
596 struct mbuf *to, *from;
597{
598 to->m_pkthdr = from->m_pkthdr;
599 from->m_pkthdr.aux = (struct mbuf *)NULL;
600 to->m_flags = from->m_flags & M_COPYFLAGS;
601 to->m_data = (to)->m_pktdat;
602}
603
1c79356b
A
604/* Best effort to get a mbuf cluster + pkthdr under one lock.
605 * If we don't have them avail, just bail out and use the regular
606 * path.
607 * Used by drivers to allocated packets on receive ring.
608 */
609struct mbuf *
610m_getpacket(void)
611{
612 struct mbuf *m;
613 m_clalloc(1, M_DONTWAIT); /* takes the MBUF_LOCK, but doesn't release it... */
614 if ((mfree != 0) && (mclfree != 0)) { /* mbuf + cluster are available */
615 m = mfree;
616 mfree = m->m_next;
617 MCHECK(m);
618 ++mclrefcnt[mtocl(m)];
619 mbstat.m_mtypes[MT_FREE]--;
620 mbstat.m_mtypes[MT_DATA]++;
621 m->m_ext.ext_buf = (caddr_t)mclfree; /* get the cluster */
622 ++mclrefcnt[mtocl(m->m_ext.ext_buf)];
623 mbstat.m_clfree--;
624 mclfree = ((union mcluster *)(m->m_ext.ext_buf))->mcl_next;
625
626 m->m_next = m->m_nextpkt = 0;
1c79356b
A
627 m->m_type = MT_DATA;
628 m->m_data = m->m_ext.ext_buf;
629 m->m_flags = M_PKTHDR | M_EXT;
9bccf70c
A
630 m->m_pkthdr.len = 0;
631 m->m_pkthdr.rcvif = NULL;
fa4905b1 632 m->m_pkthdr.header = NULL;
0b4e3aa0
A
633 m->m_pkthdr.csum_data = 0;
634 m->m_pkthdr.csum_flags = 0;
fa4905b1
A
635 m->m_pkthdr.aux = (struct mbuf *)NULL;
636 m->m_pkthdr.reserved1 = 0;
637 m->m_pkthdr.reserved2 = 0;
9bccf70c 638 m->m_ext.ext_free = 0;
1c79356b
A
639 m->m_ext.ext_size = MCLBYTES;
640 m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward =
641 &m->m_ext.ext_refs;
642 MBUF_UNLOCK();
643 }
644 else { /* slow path: either mbuf or cluster need to be allocated anyway */
645 MBUF_UNLOCK();
646
647 MGETHDR(m, M_WAITOK, MT_DATA );
648
649 if ( m == 0 )
650 return (NULL);
651
652 MCLGET( m, M_WAITOK );
653 if ( ( m->m_flags & M_EXT ) == 0 )
654 {
655 m_free(m); m = 0;
656 }
657 }
658 return (m);
659}
660
fa4905b1 661
9bccf70c
A
662/*
663 * return a list of mbuf hdrs that point to clusters...
664 * try for num_needed, if this can't be met, return whatever
665 * number were available... set up the first num_with_pkthdrs
666 * with mbuf hdrs configured as packet headers... these are
667 * chained on the m_nextpkt field... any packets requested beyond
668 * this are chained onto the last packet header's m_next field.
669 */
fa4905b1
A
670struct mbuf *
671m_getpackets(int num_needed, int num_with_pkthdrs, int how)
672{
673 struct mbuf *m;
674 struct mbuf **np, *top;
675
676 top = NULL;
677 np = &top;
678
679 m_clalloc(num_needed, how); /* takes the MBUF_LOCK, but doesn't release it... */
680
681 while (num_needed--) {
682 if (mfree && mclfree) { /* mbuf + cluster are available */
683 m = mfree;
684 MCHECK(m);
685 mfree = m->m_next;
686 ++mclrefcnt[mtocl(m)];
687 mbstat.m_mtypes[MT_FREE]--;
688 mbstat.m_mtypes[MT_DATA]++;
689 m->m_ext.ext_buf = (caddr_t)mclfree; /* get the cluster */
690 ++mclrefcnt[mtocl(m->m_ext.ext_buf)];
691 mbstat.m_clfree--;
692 mclfree = ((union mcluster *)(m->m_ext.ext_buf))->mcl_next;
693
694 m->m_next = m->m_nextpkt = 0;
695 m->m_type = MT_DATA;
696 m->m_data = m->m_ext.ext_buf;
697 m->m_ext.ext_free = 0;
698 m->m_ext.ext_size = MCLBYTES;
699 m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward = &m->m_ext.ext_refs;
700
701 if (num_with_pkthdrs == 0)
702 m->m_flags = M_EXT;
703 else {
704 m->m_flags = M_PKTHDR | M_EXT;
705 m->m_pkthdr.len = 0;
706 m->m_pkthdr.rcvif = NULL;
707 m->m_pkthdr.header = NULL;
708 m->m_pkthdr.csum_flags = 0;
709 m->m_pkthdr.csum_data = 0;
710 m->m_pkthdr.aux = (struct mbuf *)NULL;
711 m->m_pkthdr.reserved1 = NULL;
712 m->m_pkthdr.reserved2 = NULL;
713
714 num_with_pkthdrs--;
715 }
716
717 } else {
718
719 MBUF_UNLOCK();
720
721 if (num_with_pkthdrs == 0) {
722 MGET(m, how, MT_DATA );
723 } else {
724 MGETHDR(m, how, MT_DATA);
9bccf70c 725
fa4905b1
A
726 num_with_pkthdrs--;
727 }
728 if (m == 0)
729 return(top);
730
731 MCLGET(m, how);
732 if ((m->m_flags & M_EXT) == 0) {
733 m_free(m);
734 return(top);
735 }
736 MBUF_LOCK();
737 }
738 *np = m;
739
740 if (num_with_pkthdrs)
741 np = &m->m_nextpkt;
742 else
743 np = &m->m_next;
744 }
745 MBUF_UNLOCK();
746
747 return (top);
748}
749
750
9bccf70c
A
751/*
752 * return a list of mbuf hdrs set up as packet hdrs
753 * chained together on the m_nextpkt field
754 */
fa4905b1
A
755struct mbuf *
756m_getpackethdrs(int num_needed, int how)
757{
758 struct mbuf *m;
759 struct mbuf **np, *top;
760
761 top = NULL;
762 np = &top;
763
764 MBUF_LOCK();
765
766 while (num_needed--) {
767 if (m = mfree) { /* mbufs are available */
768 MCHECK(m);
769 mfree = m->m_next;
770 ++mclrefcnt[mtocl(m)];
771 mbstat.m_mtypes[MT_FREE]--;
772 mbstat.m_mtypes[MT_DATA]++;
773
774 m->m_next = m->m_nextpkt = 0;
775 m->m_type = MT_DATA;
776 m->m_flags = M_PKTHDR;
777 m->m_data = m->m_pktdat;
778 m->m_pkthdr.len = 0;
779 m->m_pkthdr.rcvif = NULL;
780 m->m_pkthdr.header = NULL;
781 m->m_pkthdr.csum_flags = 0;
782 m->m_pkthdr.csum_data = 0;
783 m->m_pkthdr.aux = (struct mbuf *)NULL;
784 m->m_pkthdr.reserved1 = NULL;
785 m->m_pkthdr.reserved2 = NULL;
786
787 } else {
788
789 MBUF_UNLOCK();
790
791 m = m_retryhdr(how, MT_DATA);
792
793 if (m == 0)
794 return(top);
795
796 MBUF_LOCK();
797 }
798 *np = m;
799 np = &m->m_nextpkt;
800 }
801 MBUF_UNLOCK();
802
803 return (top);
804}
805
806
1c79356b
A
807/* free and mbuf list (m_nextpkt) while following m_next under one lock.
808 * returns the count for mbufs packets freed. Used by the drivers.
809 */
810int
811m_freem_list(m)
812 struct mbuf *m;
813{
814 struct mbuf *nextpkt;
fa4905b1 815 int i, count=0;
1c79356b 816
1c79356b 817 MBUF_LOCK();
fa4905b1 818
1c79356b
A
819 while (m) {
820 if (m)
fa4905b1 821 nextpkt = m->m_nextpkt; /* chain of linked mbufs from driver */
1c79356b 822 else
fa4905b1 823 nextpkt = 0;
9bccf70c 824
1c79356b 825 count++;
fa4905b1 826
1c79356b 827 while (m) { /* free the mbuf chain (like mfreem) */
9bccf70c
A
828
829 struct mbuf *n;
830
831 /* Free the aux data if there is any */
832 if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.aux) {
833 /*
834 * Treat the current m as the nextpkt and set m
835 * to the aux data. This lets us free the aux
836 * data in this loop without having to call
837 * m_freem recursively, which wouldn't work
838 * because we've still got the lock.
839 */
840 nextpkt = m;
841 m = nextpkt->m_pkthdr.aux;
842 nextpkt->m_pkthdr.aux = NULL;
843 }
844
845 n = m->m_next;
fa4905b1 846
1c79356b
A
847 if (n && n->m_nextpkt)
848 panic("m_freem_list: m_nextpkt of m_next != NULL");
849 if (m->m_type == MT_FREE)
850 panic("freeing free mbuf");
fa4905b1 851
1c79356b
A
852 if (m->m_flags & M_EXT) {
853 if (MCLHASREFERENCE(m)) {
854 remque((queue_t)&m->m_ext.ext_refs);
855 } else if (m->m_ext.ext_free == NULL) {
856 union mcluster *mcl= (union mcluster *)m->m_ext.ext_buf;
9bccf70c 857 if (_MCLUNREF(mcl)) {
1c79356b
A
858 mcl->mcl_next = mclfree;
859 mclfree = mcl;
860 ++mbstat.m_clfree;
861 }
862 } else {
863 (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
864 m->m_ext.ext_size, m->m_ext.ext_arg);
865 }
866 }
867 mbstat.m_mtypes[m->m_type]--;
9bccf70c
A
868 (void) _MCLUNREF(m);
869 _MFREE_MUNGE(m);
fa4905b1 870 mbstat.m_mtypes[MT_FREE]++;
1c79356b 871 m->m_type = MT_FREE;
1c79356b
A
872 m->m_flags = 0;
873 m->m_len = 0;
874 m->m_next = mfree;
875 mfree = m;
876 m = n;
877 }
878 m = nextpkt; /* bump m with saved nextpkt if any */
879 }
fa4905b1
A
880 if (i = m_want)
881 m_want = 0;
882
1c79356b 883 MBUF_UNLOCK();
fa4905b1
A
884
885 if (i)
886 wakeup((caddr_t)&mfree);
887
1c79356b
A
888 return (count);
889}
890
891void
892m_freem(m)
893 register struct mbuf *m;
894{
895 while (m)
896 m = m_free(m);
897}
898
899/*
900 * Mbuffer utility routines.
901 */
902/*
903 * Compute the amount of space available
904 * before the current start of data in an mbuf.
905 */
906m_leadingspace(m)
907register struct mbuf *m;
908{
909 if (m->m_flags & M_EXT) {
910 if (MCLHASREFERENCE(m))
911 return(0);
912 return (m->m_data - m->m_ext.ext_buf);
913 }
914 if (m->m_flags & M_PKTHDR)
915 return (m->m_data - m->m_pktdat);
916 return (m->m_data - m->m_dat);
917}
918
919/*
920 * Compute the amount of space available
921 * after the end of data in an mbuf.
922 */
923m_trailingspace(m)
924register struct mbuf *m;
925{
926 if (m->m_flags & M_EXT) {
927 if (MCLHASREFERENCE(m))
928 return(0);
929 return (m->m_ext.ext_buf + m->m_ext.ext_size -
930 (m->m_data + m->m_len));
931 }
932 return (&m->m_dat[MLEN] - (m->m_data + m->m_len));
933}
934
935/*
936 * Lesser-used path for M_PREPEND:
937 * allocate new mbuf to prepend to chain,
938 * copy junk along.
9bccf70c 939 * Does not adjust packet header length.
1c79356b
A
940 */
941struct mbuf *
942m_prepend(m, len, how)
943 register struct mbuf *m;
944 int len, how;
945{
946 struct mbuf *mn;
947
948 MGET(mn, how, m->m_type);
949 if (mn == (struct mbuf *)NULL) {
950 m_freem(m);
951 return ((struct mbuf *)NULL);
952 }
953 if (m->m_flags & M_PKTHDR) {
954 M_COPY_PKTHDR(mn, m);
955 m->m_flags &= ~M_PKTHDR;
956 }
957 mn->m_next = m;
958 m = mn;
959 if (len < MHLEN)
960 MH_ALIGN(m, len);
961 m->m_len = len;
962 return (m);
963}
964
9bccf70c
A
965/*
966 * Replacement for old M_PREPEND macro:
967 * allocate new mbuf to prepend to chain,
968 * copy junk along, and adjust length.
969 *
970 */
971struct mbuf *
972m_prepend_2(m, len, how)
973 register struct mbuf *m;
974 int len, how;
975{
976 if (M_LEADINGSPACE(m) >= len) {
977 m->m_data -= len;
978 m->m_len += len;
979 } else {
980 m = m_prepend(m, len, how);
981 }
982 if ((m) && (m->m_flags & M_PKTHDR))
983 m->m_pkthdr.len += len;
984 return (m);
985}
986
1c79356b
A
987/*
988 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
989 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
990 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
991 */
992int MCFail;
993
994struct mbuf *
995m_copym(m, off0, len, wait)
996 register struct mbuf *m;
997 int off0, wait;
998 register int len;
999{
1000 register struct mbuf *n, **np;
1001 register int off = off0;
1002 struct mbuf *top;
1003 int copyhdr = 0;
1004
1005 if (off < 0 || len < 0)
1006 panic("m_copym");
1007 if (off == 0 && m->m_flags & M_PKTHDR)
1008 copyhdr = 1;
fa4905b1
A
1009
1010 while (off >= m->m_len) {
1c79356b
A
1011 if (m == 0)
1012 panic("m_copym");
1c79356b
A
1013 off -= m->m_len;
1014 m = m->m_next;
1015 }
1016 np = &top;
1017 top = 0;
fa4905b1
A
1018
1019 MBUF_LOCK();
1020
1c79356b
A
1021 while (len > 0) {
1022 if (m == 0) {
1023 if (len != M_COPYALL)
1024 panic("m_copym");
1025 break;
1026 }
fa4905b1
A
1027 if (n = mfree) {
1028 MCHECK(n);
1029 ++mclrefcnt[mtocl(n)];
1030 mbstat.m_mtypes[MT_FREE]--;
1031 mbstat.m_mtypes[m->m_type]++;
1032 mfree = n->m_next;
1033 n->m_next = n->m_nextpkt = 0;
1034 n->m_type = m->m_type;
1035 n->m_data = n->m_dat;
1036 n->m_flags = 0;
1037 } else {
1038 MBUF_UNLOCK();
1039 n = m_retry(wait, m->m_type);
1040 MBUF_LOCK();
1041 }
1c79356b 1042 *np = n;
fa4905b1 1043
1c79356b
A
1044 if (n == 0)
1045 goto nospace;
1046 if (copyhdr) {
1047 M_COPY_PKTHDR(n, m);
1048 if (len == M_COPYALL)
1049 n->m_pkthdr.len -= off0;
1050 else
1051 n->m_pkthdr.len = len;
1052 copyhdr = 0;
1053 }
1054 if (len == M_COPYALL) {
1055 if (min(len, (m->m_len - off)) == len) {
1056 printf("m->m_len %d - off %d = %d, %d\n",
1057 m->m_len, off, m->m_len - off,
1058 min(len, (m->m_len - off)));
1059 }
1060 }
1061 n->m_len = min(len, (m->m_len - off));
1062 if (n->m_len == M_COPYALL) {
1063 printf("n->m_len == M_COPYALL, fixing\n");
1064 n->m_len = MHLEN;
1065 }
1066 if (m->m_flags & M_EXT) {
1c79356b
A
1067 n->m_ext = m->m_ext;
1068 insque((queue_t)&n->m_ext.ext_refs, (queue_t)&m->m_ext.ext_refs);
1c79356b
A
1069 n->m_data = m->m_data + off;
1070 n->m_flags |= M_EXT;
fa4905b1 1071 } else {
1c79356b
A
1072 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1073 (unsigned)n->m_len);
fa4905b1 1074 }
1c79356b
A
1075 if (len != M_COPYALL)
1076 len -= n->m_len;
1077 off = 0;
1078 m = m->m_next;
1079 np = &n->m_next;
1080 }
fa4905b1
A
1081 MBUF_UNLOCK();
1082
1c79356b
A
1083 if (top == 0)
1084 MCFail++;
fa4905b1 1085
1c79356b
A
1086 return (top);
1087nospace:
fa4905b1
A
1088 MBUF_UNLOCK();
1089
1c79356b
A
1090 m_freem(top);
1091 MCFail++;
1092 return (0);
1093}
1094
fa4905b1 1095
9bccf70c
A
1096/*
1097 * equivilent to m_copym except that all necessary
1098 * mbuf hdrs are allocated within this routine
1099 * also, the last mbuf and offset accessed are passed
1100 * out and can be passed back in to avoid having to
1101 * rescan the entire mbuf list (normally hung off of the socket)
1102 */
fa4905b1
A
1103struct mbuf *
1104m_copym_with_hdrs(m, off0, len, wait, m_last, m_off)
1105 register struct mbuf *m;
1106 int off0, wait;
1107 register int len;
1108 struct mbuf **m_last;
1109 int *m_off;
1110{
1111 register struct mbuf *n, **np;
1112 register int off = off0;
1113 struct mbuf *top = 0;
1114 int copyhdr = 0;
1115 int type;
1116
1117 if (off == 0 && m->m_flags & M_PKTHDR)
1118 copyhdr = 1;
1119
1120 if (*m_last) {
1121 m = *m_last;
1122 off = *m_off;
1123 } else {
1124 while (off >= m->m_len) {
1125 off -= m->m_len;
1126 m = m->m_next;
1127 }
1128 }
1129 MBUF_LOCK();
1130
1131 while (len > 0) {
1132 if (top == 0)
1133 type = MT_HEADER;
1134 else {
1135 if (m == 0)
1136 panic("m_gethdr_and_copym");
1137 type = m->m_type;
1138 }
1139 if (n = mfree) {
1140 MCHECK(n);
1141 ++mclrefcnt[mtocl(n)];
1142 mbstat.m_mtypes[MT_FREE]--;
1143 mbstat.m_mtypes[type]++;
1144 mfree = n->m_next;
1145 n->m_next = n->m_nextpkt = 0;
1146 n->m_type = type;
1147
1148 if (top) {
1149 n->m_data = n->m_dat;
1150 n->m_flags = 0;
1151 } else {
1152 n->m_data = n->m_pktdat;
1153 n->m_flags = M_PKTHDR;
1154 n->m_pkthdr.len = 0;
1155 n->m_pkthdr.rcvif = NULL;
1156 n->m_pkthdr.header = NULL;
1157 n->m_pkthdr.csum_flags = 0;
1158 n->m_pkthdr.csum_data = 0;
1159 n->m_pkthdr.aux = (struct mbuf *)NULL;
1160 n->m_pkthdr.reserved1 = NULL;
1161 n->m_pkthdr.reserved2 = NULL;
1162 }
1163 } else {
1164 MBUF_UNLOCK();
1165 if (top)
1166 n = m_retry(wait, type);
1167 else
1168 n = m_retryhdr(wait, type);
1169 MBUF_LOCK();
1170 }
1171 if (n == 0)
1172 goto nospace;
1173 if (top == 0) {
1174 top = n;
1175 np = &top->m_next;
1176 continue;
1177 } else
1178 *np = n;
1179
1180 if (copyhdr) {
1181 M_COPY_PKTHDR(n, m);
1182 n->m_pkthdr.len = len;
1183 copyhdr = 0;
1184 }
1185 n->m_len = min(len, (m->m_len - off));
1186
1187 if (m->m_flags & M_EXT) {
1188 n->m_ext = m->m_ext;
1189 insque((queue_t)&n->m_ext.ext_refs, (queue_t)&m->m_ext.ext_refs);
1190 n->m_data = m->m_data + off;
1191 n->m_flags |= M_EXT;
1192 } else {
1193 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1194 (unsigned)n->m_len);
1195 }
1196 len -= n->m_len;
1197
1198 if (len == 0) {
1199 if ((off + n->m_len) == m->m_len) {
1200 *m_last = m->m_next;
1201 *m_off = 0;
1202 } else {
1203 *m_last = m;
1204 *m_off = off + n->m_len;
1205 }
1206 break;
1207 }
1208 off = 0;
1209 m = m->m_next;
1210 np = &n->m_next;
1211 }
1212 MBUF_UNLOCK();
1213
1214 return (top);
1215nospace:
1216 MBUF_UNLOCK();
1217
1218 if (top)
1219 m_freem(top);
1220 MCFail++;
1221 return (0);
1222}
1223
1224
1c79356b
A
1225/*
1226 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1227 * continuing for "len" bytes, into the indicated buffer.
1228 */
1229void m_copydata(m, off, len, cp)
1230 register struct mbuf *m;
1231 register int off;
1232 register int len;
1233 caddr_t cp;
1234{
1235 register unsigned count;
1236
1237 if (off < 0 || len < 0)
1238 panic("m_copydata");
1239 while (off > 0) {
1240 if (m == 0)
1241 panic("m_copydata");
1242 if (off < m->m_len)
1243 break;
1244 off -= m->m_len;
1245 m = m->m_next;
1246 }
1247 while (len > 0) {
1248 if (m == 0)
1249 panic("m_copydata");
1250 count = min(m->m_len - off, len);
1251 bcopy(mtod(m, caddr_t) + off, cp, count);
1252 len -= count;
1253 cp += count;
1254 off = 0;
1255 m = m->m_next;
1256 }
1257}
1258
1259/*
1260 * Concatenate mbuf chain n to m.
1261 * Both chains must be of the same type (e.g. MT_DATA).
1262 * Any m_pkthdr is not updated.
1263 */
1264void m_cat(m, n)
1265 register struct mbuf *m, *n;
1266{
1267 while (m->m_next)
1268 m = m->m_next;
1269 while (n) {
1270 if (m->m_flags & M_EXT ||
1271 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1272 /* just join the two chains */
1273 m->m_next = n;
1274 return;
1275 }
1276 /* splat the data from one into the other */
1277 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1278 (u_int)n->m_len);
1279 m->m_len += n->m_len;
1280 n = m_free(n);
1281 }
1282}
1283
1284void
1285m_adj(mp, req_len)
1286 struct mbuf *mp;
1287 int req_len;
1288{
1289 register int len = req_len;
1290 register struct mbuf *m;
1291 register count;
1292
1293 if ((m = mp) == NULL)
1294 return;
1295 if (len >= 0) {
1296 /*
1297 * Trim from head.
1298 */
1299 while (m != NULL && len > 0) {
1300 if (m->m_len <= len) {
1301 len -= m->m_len;
1302 m->m_len = 0;
1303 m = m->m_next;
1304 } else {
1305 m->m_len -= len;
1306 m->m_data += len;
1307 len = 0;
1308 }
1309 }
1310 m = mp;
1311 if (m->m_flags & M_PKTHDR)
1312 m->m_pkthdr.len -= (req_len - len);
1313 } else {
1314 /*
1315 * Trim from tail. Scan the mbuf chain,
1316 * calculating its length and finding the last mbuf.
1317 * If the adjustment only affects this mbuf, then just
1318 * adjust and return. Otherwise, rescan and truncate
1319 * after the remaining size.
1320 */
1321 len = -len;
1322 count = 0;
1323 for (;;) {
1324 count += m->m_len;
1325 if (m->m_next == (struct mbuf *)0)
1326 break;
1327 m = m->m_next;
1328 }
1329 if (m->m_len >= len) {
1330 m->m_len -= len;
1331 m = mp;
1332 if (m->m_flags & M_PKTHDR)
1333 m->m_pkthdr.len -= len;
1334 return;
1335 }
1336 count -= len;
1337 if (count < 0)
1338 count = 0;
1339 /*
1340 * Correct length for chain is "count".
1341 * Find the mbuf with last data, adjust its length,
1342 * and toss data from remaining mbufs on chain.
1343 */
1344 m = mp;
1345 if (m->m_flags & M_PKTHDR)
1346 m->m_pkthdr.len = count;
1347 for (; m; m = m->m_next) {
1348 if (m->m_len >= count) {
1349 m->m_len = count;
1350 break;
1351 }
1352 count -= m->m_len;
1353 }
1354 while (m = m->m_next)
1355 m->m_len = 0;
1356 }
1357}
1358
1359/*
1360 * Rearange an mbuf chain so that len bytes are contiguous
1361 * and in the data area of an mbuf (so that mtod and dtom
1362 * will work for a structure of size len). Returns the resulting
1363 * mbuf chain on success, frees it and returns null on failure.
1364 * If there is room, it will add up to max_protohdr-len extra bytes to the
1365 * contiguous region in an attempt to avoid being called next time.
1366 */
1367int MPFail;
1368
1369struct mbuf *
1370m_pullup(n, len)
1371 register struct mbuf *n;
1372 int len;
1373{
1374 register struct mbuf *m;
1375 register int count;
1376 int space;
1377
1378 /*
1379 * If first mbuf has no cluster, and has room for len bytes
1380 * without shifting current data, pullup into it,
1381 * otherwise allocate a new mbuf to prepend to the chain.
1382 */
1383 if ((n->m_flags & M_EXT) == 0 &&
1384 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1385 if (n->m_len >= len)
1386 return (n);
1387 m = n;
1388 n = n->m_next;
1389 len -= m->m_len;
1390 } else {
1391 if (len > MHLEN)
1392 goto bad;
1393 MGET(m, M_DONTWAIT, n->m_type);
1394 if (m == 0)
1395 goto bad;
1396 m->m_len = 0;
1397 if (n->m_flags & M_PKTHDR) {
1398 M_COPY_PKTHDR(m, n);
1399 n->m_flags &= ~M_PKTHDR;
1400 }
1401 }
1402 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1403 do {
1404 count = min(min(max(len, max_protohdr), space), n->m_len);
1405 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1406 (unsigned)count);
1407 len -= count;
1408 m->m_len += count;
1409 n->m_len -= count;
1410 space -= count;
1411 if (n->m_len)
1412 n->m_data += count;
1413 else
1414 n = m_free(n);
1415 } while (len > 0 && n);
1416 if (len > 0) {
1417 (void) m_free(m);
1418 goto bad;
1419 }
1420 m->m_next = n;
1421 return (m);
1422bad:
1423 m_freem(n);
1424 MPFail++;
1425 return (0);
1426}
1427
1428/*
1429 * Partition an mbuf chain in two pieces, returning the tail --
1430 * all but the first len0 bytes. In case of failure, it returns NULL and
1431 * attempts to restore the chain to its original state.
1432 */
1433struct mbuf *
1434m_split(m0, len0, wait)
1435 register struct mbuf *m0;
1436 int len0, wait;
1437{
1438 register struct mbuf *m, *n;
1439 unsigned len = len0, remain;
1440
1441 for (m = m0; m && len > m->m_len; m = m->m_next)
1442 len -= m->m_len;
1443 if (m == 0)
1444 return (0);
1445 remain = m->m_len - len;
1446 if (m0->m_flags & M_PKTHDR) {
1447 MGETHDR(n, wait, m0->m_type);
1448 if (n == 0)
1449 return (0);
1450 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1451 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1452 m0->m_pkthdr.len = len0;
1453 if (m->m_flags & M_EXT)
1454 goto extpacket;
1455 if (remain > MHLEN) {
1456 /* m can't be the lead packet */
1457 MH_ALIGN(n, 0);
1458 n->m_next = m_split(m, len, wait);
1459 if (n->m_next == 0) {
1460 (void) m_free(n);
1461 return (0);
1462 } else
1463 return (n);
1464 } else
1465 MH_ALIGN(n, remain);
1466 } else if (remain == 0) {
1467 n = m->m_next;
1468 m->m_next = 0;
1469 return (n);
1470 } else {
1471 MGET(n, wait, m->m_type);
1472 if (n == 0)
1473 return (0);
1474 M_ALIGN(n, remain);
1475 }
1476extpacket:
1477 if (m->m_flags & M_EXT) {
1478 n->m_flags |= M_EXT;
1c79356b 1479 MBUF_LOCK();
0b4e3aa0
A
1480 n->m_ext = m->m_ext;
1481 insque((queue_t)&n->m_ext.ext_refs, (queue_t)&m->m_ext.ext_refs);
1c79356b 1482 MBUF_UNLOCK();
1c79356b
A
1483 n->m_data = m->m_data + len;
1484 } else {
1485 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1486 }
1487 n->m_len = remain;
1488 m->m_len = len;
1489 n->m_next = m->m_next;
1490 m->m_next = 0;
1491 return (n);
1492}
1493/*
1494 * Routine to copy from device local memory into mbufs.
1495 */
1496struct mbuf *
1497m_devget(buf, totlen, off0, ifp, copy)
1498 char *buf;
1499 int totlen, off0;
1500 struct ifnet *ifp;
1501 void (*copy)();
1502{
1503 register struct mbuf *m;
1504 struct mbuf *top = 0, **mp = &top;
1505 register int off = off0, len;
1506 register char *cp;
1507 char *epkt;
1508
1509 cp = buf;
1510 epkt = cp + totlen;
1511 if (off) {
1512 /*
1513 * If 'off' is non-zero, packet is trailer-encapsulated,
1514 * so we have to skip the type and length fields.
1515 */
1516 cp += off + 2 * sizeof(u_int16_t);
1517 totlen -= 2 * sizeof(u_int16_t);
1518 }
1519 MGETHDR(m, M_DONTWAIT, MT_DATA);
1520 if (m == 0)
1521 return (0);
1522 m->m_pkthdr.rcvif = ifp;
1523 m->m_pkthdr.len = totlen;
1524 m->m_len = MHLEN;
1525
1526 while (totlen > 0) {
1527 if (top) {
1528 MGET(m, M_DONTWAIT, MT_DATA);
1529 if (m == 0) {
1530 m_freem(top);
1531 return (0);
1532 }
1533 m->m_len = MLEN;
1534 }
1535 len = min(totlen, epkt - cp);
1536 if (len >= MINCLSIZE) {
1537 MCLGET(m, M_DONTWAIT);
1538 if (m->m_flags & M_EXT)
1539 m->m_len = len = min(len, MCLBYTES);
1540 else {
1541 /* give up when it's out of cluster mbufs */
1542 if (top)
1543 m_freem(top);
1544 m_freem(m);
1545 return (0);
1546 }
1547 } else {
1548 /*
1549 * Place initial small packet/header at end of mbuf.
1550 */
1551 if (len < m->m_len) {
1552 if (top == 0 && len + max_linkhdr <= m->m_len)
1553 m->m_data += max_linkhdr;
1554 m->m_len = len;
1555 } else
1556 len = m->m_len;
1557 }
1558 if (copy)
1559 copy(cp, mtod(m, caddr_t), (unsigned)len);
1560 else
1561 bcopy(cp, mtod(m, caddr_t), (unsigned)len);
1562 cp += len;
1563 *mp = m;
1564 mp = &m->m_next;
1565 totlen -= len;
1566 if (cp == epkt)
1567 cp = buf;
1568 }
1569 return (top);
1570}
1571
1572/*
1573 * Cluster freelist allocation check. The mbuf lock must be held.
1574 * Ensure hysteresis between hi/lo.
1575 */
1576static int
1577m_howmany()
1578{
1579 register int i;
1580
1581 /* Under minimum */
1582 if (mbstat.m_clusters < MINCL)
1583 return (MINCL - mbstat.m_clusters);
1584 /* Too few (free < 1/2 total) and not over maximum */
1585 if (mbstat.m_clusters < nmbclusters &&
1586 (i = ((mbstat.m_clusters >> 1) - mbstat.m_clfree)) > 0)
1587 return i;
1588 return 0;
1589}
1590
1591
1592/*
1593 * Copy data from a buffer back into the indicated mbuf chain,
1594 * starting "off" bytes from the beginning, extending the mbuf
1595 * chain if necessary.
1596 */
1597void
1598m_copyback(m0, off, len, cp)
1599 struct mbuf *m0;
1600 register int off;
1601 register int len;
1602 caddr_t cp;
1603{
1604 register int mlen;
1605 register struct mbuf *m = m0, *n;
1606 int totlen = 0;
1607
1608 if (m0 == 0)
1609 return;
1610 while (off > (mlen = m->m_len)) {
1611 off -= mlen;
1612 totlen += mlen;
1613 if (m->m_next == 0) {
1614 n = m_getclr(M_DONTWAIT, m->m_type);
1615 if (n == 0)
1616 goto out;
1617 n->m_len = min(MLEN, len + off);
1618 m->m_next = n;
1619 }
1620 m = m->m_next;
1621 }
1622 while (len > 0) {
1623 mlen = min (m->m_len - off, len);
1624 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1625 cp += mlen;
1626 len -= mlen;
1627 mlen += off;
1628 off = 0;
1629 totlen += mlen;
1630 if (len == 0)
1631 break;
1632 if (m->m_next == 0) {
1633 n = m_get(M_DONTWAIT, m->m_type);
1634 if (n == 0)
1635 break;
1636 n->m_len = min(MLEN, len);
1637 m->m_next = n;
1638 }
1639 m = m->m_next;
1640 }
1641out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1642 m->m_pkthdr.len = totlen;
1643}
1644
1645
1646char *mcl_to_paddr(register char *addr) {
1647 register int base_phys;
1648
1649 if (addr < (char *)mbutl || addr >= (char *)embutl)
1650 return (0);
1651 base_phys = mcl_paddr[(addr - (char *)mbutl) >> PAGE_SHIFT];
1652
1653 if (base_phys == 0)
1654 return (0);
1655 return ((char *)((int)base_phys | ((int)addr & PAGE_MASK)));
1656}
1657
1658/*
1659 * Dup the mbuf chain passed in. The whole thing. No cute additional cruft.
1660 * And really copy the thing. That way, we don't "precompute" checksums
1661 * for unsuspecting consumers.
1662 * Assumption: m->m_nextpkt == 0.
1663 * Trick: for small packets, don't dup into a cluster. That way received
1664 * packets don't take up too much room in the sockbuf (cf. sbspace()).
1665 */
1666int MDFail;
1667
1668struct mbuf *
1669m_dup(register struct mbuf *m, int how)
1670{ register struct mbuf *n, **np;
1671 struct mbuf *top;
1672 int copyhdr = 0;
1673
1674 np = &top;
1675 top = 0;
1676 if (m->m_flags & M_PKTHDR)
1677 copyhdr = 1;
1678
1679 /*
1680 * Quick check: if we have one mbuf and its data fits in an
1681 * mbuf with packet header, just copy and go.
1682 */
1683 if (m->m_next == NULL)
1684 { /* Then just move the data into an mbuf and be done... */
1685 if (copyhdr)
1686 { if (m->m_pkthdr.len <= MHLEN)
1687 { if ((n = m_gethdr(how, m->m_type)) == NULL)
1688 return(NULL);
1689 n->m_len = m->m_len;
1690 n->m_flags |= (m->m_flags & M_COPYFLAGS);
1691 n->m_pkthdr.len = m->m_pkthdr.len;
1692 n->m_pkthdr.rcvif = m->m_pkthdr.rcvif;
1693 n->m_pkthdr.header = NULL;
fa4905b1
A
1694 n->m_pkthdr.csum_flags = 0;
1695 n->m_pkthdr.csum_data = 0;
1c79356b 1696 n->m_pkthdr.aux = NULL;
fa4905b1
A
1697 n->m_pkthdr.reserved1 = 0;
1698 n->m_pkthdr.reserved2 = 0;
1c79356b
A
1699 bcopy(m->m_data, n->m_data, m->m_pkthdr.len);
1700 return(n);
1701 }
1702 } else if (m->m_len <= MLEN)
1703 { if ((n = m_get(how, m->m_type)) == NULL)
1704 return(NULL);
1705 bcopy(m->m_data, n->m_data, m->m_len);
1706 n->m_len = m->m_len;
1707 return(n);
1708 }
1709 }
1710 while (m)
1711 {
1712#if BLUE_DEBUG
1713 kprintf("<%x: %x, %x, %x\n", m, m->m_flags, m->m_len,
1714 m->m_data);
1715#endif
1716 if (copyhdr)
1717 n = m_gethdr(how, m->m_type);
1718 else
1719 n = m_get(how, m->m_type);
1720 if (n == 0)
1721 goto nospace;
1722 if (m->m_flags & M_EXT)
1723 { MCLGET(n, how);
1724 if ((n->m_flags & M_EXT) == 0)
1725 goto nospace;
1726 }
1727 *np = n;
1728 if (copyhdr)
1729 { /* Don't use M_COPY_PKTHDR: preserve m_data */
1730 n->m_pkthdr = m->m_pkthdr;
1731 n->m_flags |= (m->m_flags & M_COPYFLAGS);
1732 copyhdr = 0;
1733 if ((n->m_flags & M_EXT) == 0)
1734 n->m_data = n->m_pktdat;
1735 }
1736 n->m_len = m->m_len;
1737 /*
1738 * Get the dup on the same bdry as the original
1739 * Assume that the two mbufs have the same offset to data area
1740 * (up to word bdries)
1741 */
1742 bcopy(mtod(m, caddr_t), mtod(n, caddr_t), (unsigned)n->m_len);
1743 m = m->m_next;
1744 np = &n->m_next;
1745#if BLUE_DEBUG
1746 kprintf(">%x: %x, %x, %x\n", n, n->m_flags, n->m_len,
1747 n->m_data);
1748#endif
1749 }
1750
1751 if (top == 0)
1752 MDFail++;
1753 return (top);
1754 nospace:
1755 m_freem(top);
1756 MDFail++;
1757 return (0);
1758}
1759
9bccf70c
A
1760int
1761m_mclref(struct mbuf *p)
1762{
1763 return (_MCLREF(p));
1764}
1765
1766int
1767m_mclunref(struct mbuf *p)
1768{
1769 return (_MCLUNREF(p));
1770}
1771
1772/* change mbuf to new type */
1773void
1774m_mchtype(struct mbuf *m, int t)
1775{
1776 MBUF_LOCK();
1777 mbstat.m_mtypes[(m)->m_type]--;
1778 mbstat.m_mtypes[t]++;
1779 (m)->m_type = t;
1780 MBUF_UNLOCK();
1781}
1782
1783void *m_mtod(struct mbuf *m)
1784{
1785 return ((m)->m_data);
1786}
1787
1788struct mbuf *m_dtom(void *x)
1789{
1790 return ((struct mbuf *)((u_long)(x) & ~(MSIZE-1)));
1791}
1792
1793int m_mtocl(void *x)
1794{
1795 return (((char *)(x) - (char *)mbutl) / sizeof(union mcluster));
1796}
1797
1798union mcluster *m_cltom(int x)
1799{
1800 return ((union mcluster *)(mbutl + (x)));
1801}
1802
1803
1804void m_mcheck(struct mbuf *m)
1805{
1806 if (m->m_type != MT_FREE)
1807 panic("mget MCHECK: m_type=%x m=%x", m->m_type, m);
1808}
1809
1c79356b
A
1810#if 0
1811#include <sys/sysctl.h>
1812
1813static int mhog_num = 0;
1814static struct mbuf *mhog_chain = 0;
1815static int mhog_wait = 1;
1816
1817static int
1818sysctl_mhog_num SYSCTL_HANDLER_ARGS
1819{
1820 int old = mhog_num;
1821 int error;
1822
1823 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
1824 if (!error && req->newptr) {
1825 int i;
1826 struct mbuf *m;
1827
1828 if (mhog_chain) {
1829 m_freem(mhog_chain);
1830 mhog_chain = 0;
1831 }
1832
1833 for (i = 0; i < mhog_num; i++) {
1834 MGETHDR(m, mhog_wait ? M_WAIT : M_DONTWAIT, MT_DATA);
1835 if (m == 0)
1836 break;
1837
1838 MCLGET(m, mhog_wait ? M_WAIT : M_DONTWAIT);
1839 if ((m->m_flags & M_EXT) == 0) {
1840 m_free(m);
1841 m = 0;
1842 break;
1843 }
1844 m->m_next = mhog_chain;
1845 mhog_chain = m;
1846 }
1847 mhog_num = i;
1848 }
1849
1850 return error;
1851}
1852
1853SYSCTL_NODE(_kern_ipc, OID_AUTO, mhog, CTLFLAG_RW, 0, "mbuf hog");
1854
1855SYSCTL_PROC(_kern_ipc_mhog, OID_AUTO, cluster, CTLTYPE_INT|CTLFLAG_RW,
1856 &mhog_num, 0, &sysctl_mhog_num, "I", "");
1857SYSCTL_INT(_kern_ipc_mhog, OID_AUTO, wait, CTLFLAG_RW, &mhog_wait,
1858 0, "");
1859#endif
1860