]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/uipc_mbuf.c
xnu-344.21.74.tar.gz
[apple/xnu.git] / bsd / kern / uipc_mbuf.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
d7e50217 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
d7e50217
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
d7e50217
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
26/*
27 * Copyright (c) 1982, 1986, 1988, 1991, 1993
28 * The Regents of the University of California. All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by the University of
41 * California, Berkeley and its contributors.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
59 */
60/* HISTORY
61 *
62 * 10/15/97 Annette DeSchon (deschon@apple.com)
63 * Fixed bug in which all cluster mbufs were broken up
64 * into regular mbufs: Some clusters are now reserved.
65 * When a cluster is needed, regular mbufs are no longer
66 * used. (Radar 1683621)
67 * 20-May-95 Mac Gillon (mgillon) at NeXT
68 * New version based on 4.4
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/malloc.h>
74#include <sys/mbuf.h>
75#include <sys/kernel.h>
76#include <sys/syslog.h>
77#include <sys/protosw.h>
78#include <sys/domain.h>
79#include <net/netisr.h>
80
81#include <kern/queue.h>
9bccf70c
A
82#include <kern/kern_types.h>
83#include <kern/sched_prim.h>
84
d7e50217
A
85#include <IOKit/IOMapper.h>
86
9bccf70c
A
87#define _MCLREF(p) (++mclrefcnt[mtocl(p)])
88#define _MCLUNREF(p) (--mclrefcnt[mtocl(p)] == 0)
1c79356b 89
d7e50217
A
90extern pmap_t kernel_pmap; /* The kernel's pmap */
91/* kernel translater */
92extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
1c79356b
A
93
94decl_simple_lock_data(, mbuf_slock);
95struct mbuf *mfree; /* mbuf free list */
96struct mbuf *mfreelater; /* mbuf deallocation list */
97extern vm_map_t mb_map; /* special map */
98int m_want; /* sleepers on mbufs */
99extern int nmbclusters; /* max number of mapped clusters */
100short *mclrefcnt; /* mapped cluster reference counts */
101int *mcl_paddr;
d7e50217 102static ppnum_t mcl_paddr_base; /* Handle returned by IOMapper::iovmAlloc() */
1c79356b
A
103union mcluster *mclfree; /* mapped cluster free list */
104int max_linkhdr; /* largest link-level header */
105int max_protohdr; /* largest protocol header */
106int max_hdr; /* largest link+protocol header */
107int max_datalen; /* MHLEN - max_hdr */
108struct mbstat mbstat; /* statistics */
109union mcluster *mbutl; /* first mapped cluster address */
110union mcluster *embutl; /* ending virtual address of mclusters */
111
112static int nclpp; /* # clusters per physical page */
113static char mbfail[] = "mbuf not mapped";
114
115static int m_howmany();
116
117/* The number of cluster mbufs that are allocated, to start. */
118#define MINCL max(16, 2)
119
120extern int dlil_input_thread_wakeup;
121extern int dlil_expand_mcl;
122extern int dlil_initialized;
123
9bccf70c
A
124#if 0
125static int mfree_munge = 0;
126#if 0
127#define _MFREE_MUNGE(m) { \
128 if (mfree_munge) \
129 { int i; \
130 vm_offset_t *element = (vm_offset_t *)(m); \
131 for (i = 0; \
132 i < sizeof(struct mbuf)/sizeof(vm_offset_t); \
133 i++) \
134 (element)[i] = 0xdeadbeef; \
135 } \
136}
137#else
138void
139munge_mbuf(struct mbuf *m)
140{
141 int i;
142 vm_offset_t *element = (vm_offset_t *)(m);
143 for (i = 0;
144 i < sizeof(struct mbuf)/sizeof(vm_offset_t);
145 i++)
146 (element)[i] = 0xdeadbeef;
147}
148#define _MFREE_MUNGE(m) { \
149 if (mfree_munge) \
150 munge_mbuf(m); \
151}
152#endif
153#else
154#define _MFREE_MUNGE(m)
155#endif
156
157
158#define _MINTGET(m, type) { \
159 MBUF_LOCK(); \
160 if (((m) = mfree) != 0) { \
161 MCHECK(m); \
162 ++mclrefcnt[mtocl(m)]; \
163 mbstat.m_mtypes[MT_FREE]--; \
164 mbstat.m_mtypes[(type)]++; \
165 mfree = (m)->m_next; \
166 } \
167 MBUF_UNLOCK(); \
168}
169
1c79356b
A
170
171void
172mbinit()
173{
174 int s,m;
175 int initmcl = 32;
d7e50217 176 int mcl_pages;
1c79356b
A
177
178 if (nclpp)
179 return;
d7e50217 180 nclpp = round_page_32(MCLBYTES) / MCLBYTES; /* see mbufgc() */
1c79356b
A
181 if (nclpp < 1) nclpp = 1;
182 MBUF_LOCKINIT();
183// NETISR_LOCKINIT();
fa4905b1
A
184
185 mbstat.m_msize = MSIZE;
186 mbstat.m_mclbytes = MCLBYTES;
187 mbstat.m_minclsize = MINCLSIZE;
188 mbstat.m_mlen = MLEN;
189 mbstat.m_mhlen = MHLEN;
190
1c79356b
A
191 if (nmbclusters == 0)
192 nmbclusters = NMBCLUSTERS;
193 MALLOC(mclrefcnt, short *, nmbclusters * sizeof (short),
194 M_TEMP, M_WAITOK);
195 if (mclrefcnt == 0)
196 panic("mbinit");
197 for (m = 0; m < nmbclusters; m++)
198 mclrefcnt[m] = -1;
199
d7e50217
A
200 /* Calculate the number of pages assigned to the cluster pool */
201 mcl_pages = nmbclusters/(PAGE_SIZE/CLBYTES);
202 MALLOC(mcl_paddr, int *, mcl_pages * sizeof(int), M_TEMP, M_WAITOK);
1c79356b
A
203 if (mcl_paddr == 0)
204 panic("mbinit1");
d7e50217
A
205 /* Register with the I/O Bus mapper */
206 mcl_paddr_base = IOMapperIOVMAlloc(mcl_pages);
207 bzero((char *)mcl_paddr, mcl_pages * sizeof(int));
1c79356b
A
208
209 embutl = (union mcluster *)((unsigned char *)mbutl + (nmbclusters * MCLBYTES));
210
211 PE_parse_boot_arg("initmcl", &initmcl);
212
213 if (m_clalloc(max(PAGE_SIZE/CLBYTES, 1) * initmcl, M_WAIT) == 0)
214 goto bad;
215 MBUF_UNLOCK();
216 return;
217bad:
218 panic("mbinit");
219}
220
221/*
222 * Allocate some number of mbuf clusters
223 * and place on cluster free list.
224 */
225/* ARGSUSED */
226m_clalloc(ncl, nowait)
227 register int ncl;
228 int nowait;
229{
230 register union mcluster *mcl;
231 register int i;
232 vm_size_t size;
233 static char doing_alloc;
234
235 /*
236 * Honor the caller's wish to block or not block.
237 * We have a way to grow the pool asynchronously,
238 * by kicking the dlil_input_thread.
239 */
240 if ((i = m_howmany()) <= 0)
241 goto out;
242
243 if ((nowait == M_DONTWAIT))
244 goto out;
245
246 if (ncl < i)
247 ncl = i;
d7e50217 248 size = round_page_32(ncl * MCLBYTES);
1c79356b
A
249 mcl = (union mcluster *)kmem_mb_alloc(mb_map, size);
250
251 if (mcl == 0 && ncl > 1) {
d7e50217 252 size = round_page_32(MCLBYTES); /* Try for 1 if failed */
1c79356b
A
253 mcl = (union mcluster *)kmem_mb_alloc(mb_map, size);
254 }
255
256 if (mcl) {
257 MBUF_LOCK();
258 ncl = size / MCLBYTES;
259 for (i = 0; i < ncl; i++) {
260 if (++mclrefcnt[mtocl(mcl)] != 0)
261 panic("m_clalloc already there");
d7e50217
A
262 if (((int)mcl & PAGE_MASK) == 0) {
263 ppnum_t offset = ((char *)mcl - (char *)mbutl)/PAGE_SIZE;
264 ppnum_t new_page = pmap_find_phys(kernel_pmap, (vm_address_t) mcl);
265
266 /*
267 * In the case of no mapper being available
268 * the following code nops and returns the
269 * input page, if there is a mapper the I/O
270 * page appropriate is returned.
271 */
272 new_page = IOMapperInsertPage(mcl_paddr_base, offset, new_page);
273 mcl_paddr[offset] = new_page << 12;
274 }
1c79356b
A
275
276 mcl->mcl_next = mclfree;
277 mclfree = mcl++;
278 }
279 mbstat.m_clfree += ncl;
280 mbstat.m_clusters += ncl;
281 return (ncl);
282 } /* else ... */
283out:
284 MBUF_LOCK();
285
286 /*
287 * When non-blocking we kick the dlil thread if we havve to grow the
288 * pool or if the number of free clusters is less than requested.
289 */
290 if ((nowait == M_DONTWAIT) && (i > 0 || ncl >= mbstat.m_clfree)) {
291 dlil_expand_mcl = 1;
292 if (dlil_initialized)
293 wakeup((caddr_t)&dlil_input_thread_wakeup);
294 }
295
296 if (mbstat.m_clfree >= ncl)
297 return 1;
298
299 mbstat.m_drops++;
300
301 return 0;
302}
303
304/*
305 * Add more free mbufs by cutting up a cluster.
306 */
307m_expand(canwait)
308 int canwait;
309{
310 register caddr_t mcl;
311
312 if (mbstat.m_clfree < (mbstat.m_clusters >> 4))
313 /* 1/16th of the total number of cluster mbufs allocated is
314 reserved for large packets. The number reserved must
315 always be < 1/2, or future allocation will be prevented.
316 */
317 return 0;
318
319 MCLALLOC(mcl, canwait);
320 if (mcl) {
321 register struct mbuf *m = (struct mbuf *)mcl;
322 register int i = NMBPCL;
323 MBUF_LOCK();
324 mbstat.m_mtypes[MT_FREE] += i;
325 mbstat.m_mbufs += i;
326 while (i--) {
9bccf70c 327 _MFREE_MUNGE(m);
1c79356b
A
328 m->m_type = MT_FREE;
329 m->m_next = mfree;
330 mfree = m++;
331 }
332 i = m_want;
333 m_want = 0;
334 MBUF_UNLOCK();
335 if (i) wakeup((caddr_t)&mfree);
336 return 1;
337 }
338 return 0;
339}
340
341/*
342 * When MGET failes, ask protocols to free space when short of memory,
343 * then re-attempt to allocate an mbuf.
344 */
345struct mbuf *
346m_retry(canwait, type)
347 int canwait, type;
348{
1c79356b
A
349 register struct mbuf *m;
350 int wait, s;
351 funnel_t * fnl;
352 int fnl_switch = 0;
353 boolean_t funnel_state;
354
355 for (;;) {
356 (void) m_expand(canwait);
9bccf70c
A
357 _MINTGET(m, type);
358 if (m) {
359 (m)->m_next = (m)->m_nextpkt = 0;
360 (m)->m_type = (type);
361 (m)->m_data = (m)->m_dat;
362 (m)->m_flags = 0;
363 }
1c79356b
A
364 if (m || canwait == M_DONTWAIT)
365 break;
366 MBUF_LOCK();
367 wait = m_want++;
1c79356b 368 dlil_expand_mcl = 1;
9bccf70c
A
369 if (wait == 0)
370 mbstat.m_drain++;
371 else
372 mbstat.m_wait++;
1c79356b
A
373 MBUF_UNLOCK();
374
375 if (dlil_initialized)
376 wakeup((caddr_t)&dlil_input_thread_wakeup);
377
1c79356b
A
378 /*
379 * Grab network funnel because m_reclaim calls into the
380 * socket domains and tsleep end-up calling splhigh
381 */
382 fnl = thread_funnel_get();
9bccf70c
A
383 if (fnl && (fnl == kernel_flock)) {
384 fnl_switch = 1;
385 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
386 } else
387 funnel_state = thread_funnel_set(network_flock, TRUE);
1c79356b
A
388 if (wait == 0) {
389 m_reclaim();
390 } else {
391 /* Sleep with a small timeout as insurance */
9bccf70c 392 (void) tsleep((caddr_t)&mfree, PZERO-1, "m_retry", hz);
1c79356b
A
393 }
394 if (fnl_switch)
395 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
396 else
397 thread_funnel_set(network_flock, funnel_state);
398 }
399 return (m);
1c79356b
A
400}
401
402/*
403 * As above; retry an MGETHDR.
404 */
405struct mbuf *
406m_retryhdr(canwait, type)
407 int canwait, type;
408{
409 register struct mbuf *m;
410
411 if (m = m_retry(canwait, type)) {
412 m->m_flags |= M_PKTHDR;
413 m->m_data = m->m_pktdat;
fa4905b1
A
414 m->m_pkthdr.rcvif = NULL;
415 m->m_pkthdr.len = 0;
416 m->m_pkthdr.header = NULL;
417 m->m_pkthdr.csum_flags = 0;
418 m->m_pkthdr.csum_data = 0;
419 m->m_pkthdr.aux = (struct mbuf *)NULL;
420 m->m_pkthdr.reserved1 = NULL;
421 m->m_pkthdr.reserved2 = NULL;
1c79356b
A
422 }
423 return (m);
424}
425
426m_reclaim()
427{
428 register struct domain *dp;
429 register struct protosw *pr;
430
431 for (dp = domains; dp; dp = dp->dom_next)
432 for (pr = dp->dom_protosw; pr; pr = pr->pr_next)
433 if (pr->pr_drain)
434 (*pr->pr_drain)();
435 mbstat.m_drain++;
436}
437
438/*
439 * Space allocation routines.
440 * These are also available as macros
441 * for critical paths.
442 */
443struct mbuf *
444m_get(nowait, type)
445 int nowait, type;
446{
447 register struct mbuf *m;
448
9bccf70c
A
449 _MINTGET(m, type);
450 if (m) {
451 m->m_next = m->m_nextpkt = 0;
452 m->m_type = type;
453 m->m_data = m->m_dat;
454 m->m_flags = 0;
455 } else
456 (m) = m_retry(nowait, type);
457
1c79356b
A
458 return (m);
459}
460
461struct mbuf *
462m_gethdr(nowait, type)
463 int nowait, type;
464{
465 register struct mbuf *m;
466
9bccf70c
A
467 _MINTGET(m, type);
468 if (m) {
469 m->m_next = m->m_nextpkt = 0;
470 m->m_type = type;
471 m->m_data = m->m_pktdat;
472 m->m_flags = M_PKTHDR;
473 m->m_pkthdr.rcvif = NULL;
474 m->m_pkthdr.header = NULL;
475 m->m_pkthdr.csum_flags = 0;
476 m->m_pkthdr.csum_data = 0;
477 m->m_pkthdr.aux = (struct mbuf *)NULL;
478 m->m_pkthdr.reserved1 = NULL;
479 m->m_pkthdr.reserved2 = NULL;
480 } else
481 m = m_retryhdr(nowait, type);
482
483 return m;
1c79356b
A
484}
485
486struct mbuf *
487m_getclr(nowait, type)
488 int nowait, type;
489{
490 register struct mbuf *m;
491
492 MGET(m, nowait, type);
493 if (m == 0)
494 return (0);
495 bzero(mtod(m, caddr_t), MLEN);
496 return (m);
497}
498
499struct mbuf *
500m_free(m)
501 struct mbuf *m;
502{
503 struct mbuf *n = m->m_next;
504 int i, s;
505
506 if (m->m_type == MT_FREE)
507 panic("freeing free mbuf");
508
9bccf70c
A
509 /* Free the aux data if there is any */
510 if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.aux)
511 {
512 m_freem(m->m_pkthdr.aux);
513 }
514
1c79356b 515 MBUF_LOCK();
9bccf70c
A
516 if ((m->m_flags & M_EXT))
517 {
1c79356b
A
518 if (MCLHASREFERENCE(m)) {
519 remque((queue_t)&m->m_ext.ext_refs);
520 } else if (m->m_ext.ext_free == NULL) {
521 union mcluster *mcl= (union mcluster *)m->m_ext.ext_buf;
9bccf70c 522 if (_MCLUNREF(mcl)) {
1c79356b
A
523 mcl->mcl_next = mclfree;
524 mclfree = mcl;
525 ++mbstat.m_clfree;
526 }
527#ifdef COMMENT_OUT
528/* *** Since m_split() increments "mclrefcnt[mtocl(m->m_ext.ext_buf)]",
529 and AppleTalk ADSP uses m_split(), this incorrect sanity check
530 caused a panic.
531*** */
532 else /* sanity check - not referenced this way */
533 panic("m_free m_ext cluster not free");
534#endif
535 } else {
536 (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
537 m->m_ext.ext_size, m->m_ext.ext_arg);
538 }
539 }
540 mbstat.m_mtypes[m->m_type]--;
9bccf70c
A
541 (void) _MCLUNREF(m);
542 _MFREE_MUNGE(m);
1c79356b
A
543 m->m_type = MT_FREE;
544 mbstat.m_mtypes[m->m_type]++;
545 m->m_flags = 0;
546 m->m_next = mfree;
547 m->m_len = 0;
548 mfree = m;
549 i = m_want;
550 m_want = 0;
551 MBUF_UNLOCK();
552 if (i) wakeup((caddr_t)&mfree);
553 return (n);
554}
555
9bccf70c
A
556/* m_mclget() add an mbuf cluster to a normal mbuf */
557struct mbuf *
558m_mclget(m, nowait)
559 struct mbuf *m;
560 int nowait;
561{
562 MCLALLOC(m->m_ext.ext_buf, nowait);
563 if (m->m_ext.ext_buf) {
564 m->m_data = m->m_ext.ext_buf;
565 m->m_flags |= M_EXT;
566 m->m_ext.ext_size = MCLBYTES;
567 m->m_ext.ext_free = 0;
568 m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward =
569 &m->m_ext.ext_refs;
570 }
571
572 return m;
573}
574
575/* m_mclalloc() allocate an mbuf cluster */
576caddr_t
577m_mclalloc( nowait)
578 int nowait;
579{
580 caddr_t p;
581
582 (void)m_clalloc(1, nowait);
583 if ((p = (caddr_t)mclfree)) {
584 ++mclrefcnt[mtocl(p)];
585 mbstat.m_clfree--;
586 mclfree = ((union mcluster *)p)->mcl_next;
587 }
588 MBUF_UNLOCK();
589
590 return p;
591}
592
593/* m_mclfree() releases a reference to a cluster allocated by MCLALLOC,
594 * freeing the cluster if the reference count has reached 0. */
595void
596m_mclfree(p)
597 caddr_t p;
598{
599 MBUF_LOCK();
600 if (--mclrefcnt[mtocl(p)] == 0) {
601 ((union mcluster *)(p))->mcl_next = mclfree;
602 mclfree = (union mcluster *)(p);
603 mbstat.m_clfree++;
604 }
605 MBUF_UNLOCK();
606}
607
608/* mcl_hasreference() checks if a cluster of an mbuf is referenced by another mbuf */
609int
610m_mclhasreference(m)
611 struct mbuf *m;
612{
613 return (m->m_ext.ext_refs.forward != &(m->m_ext.ext_refs));
614}
615
616/* */
617void
618m_copy_pkthdr(to, from)
619 struct mbuf *to, *from;
620{
621 to->m_pkthdr = from->m_pkthdr;
622 from->m_pkthdr.aux = (struct mbuf *)NULL;
623 to->m_flags = from->m_flags & M_COPYFLAGS;
624 to->m_data = (to)->m_pktdat;
625}
626
1c79356b
A
627/* Best effort to get a mbuf cluster + pkthdr under one lock.
628 * If we don't have them avail, just bail out and use the regular
629 * path.
630 * Used by drivers to allocated packets on receive ring.
631 */
632struct mbuf *
633m_getpacket(void)
634{
635 struct mbuf *m;
636 m_clalloc(1, M_DONTWAIT); /* takes the MBUF_LOCK, but doesn't release it... */
637 if ((mfree != 0) && (mclfree != 0)) { /* mbuf + cluster are available */
638 m = mfree;
639 mfree = m->m_next;
640 MCHECK(m);
641 ++mclrefcnt[mtocl(m)];
642 mbstat.m_mtypes[MT_FREE]--;
643 mbstat.m_mtypes[MT_DATA]++;
644 m->m_ext.ext_buf = (caddr_t)mclfree; /* get the cluster */
645 ++mclrefcnt[mtocl(m->m_ext.ext_buf)];
646 mbstat.m_clfree--;
647 mclfree = ((union mcluster *)(m->m_ext.ext_buf))->mcl_next;
648
649 m->m_next = m->m_nextpkt = 0;
1c79356b
A
650 m->m_type = MT_DATA;
651 m->m_data = m->m_ext.ext_buf;
652 m->m_flags = M_PKTHDR | M_EXT;
9bccf70c
A
653 m->m_pkthdr.len = 0;
654 m->m_pkthdr.rcvif = NULL;
fa4905b1 655 m->m_pkthdr.header = NULL;
0b4e3aa0
A
656 m->m_pkthdr.csum_data = 0;
657 m->m_pkthdr.csum_flags = 0;
fa4905b1
A
658 m->m_pkthdr.aux = (struct mbuf *)NULL;
659 m->m_pkthdr.reserved1 = 0;
660 m->m_pkthdr.reserved2 = 0;
9bccf70c 661 m->m_ext.ext_free = 0;
1c79356b
A
662 m->m_ext.ext_size = MCLBYTES;
663 m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward =
664 &m->m_ext.ext_refs;
665 MBUF_UNLOCK();
666 }
667 else { /* slow path: either mbuf or cluster need to be allocated anyway */
668 MBUF_UNLOCK();
669
670 MGETHDR(m, M_WAITOK, MT_DATA );
671
672 if ( m == 0 )
673 return (NULL);
674
675 MCLGET( m, M_WAITOK );
676 if ( ( m->m_flags & M_EXT ) == 0 )
677 {
678 m_free(m); m = 0;
679 }
680 }
681 return (m);
682}
683
fa4905b1 684
9bccf70c
A
685/*
686 * return a list of mbuf hdrs that point to clusters...
687 * try for num_needed, if this can't be met, return whatever
688 * number were available... set up the first num_with_pkthdrs
689 * with mbuf hdrs configured as packet headers... these are
690 * chained on the m_nextpkt field... any packets requested beyond
691 * this are chained onto the last packet header's m_next field.
692 */
fa4905b1
A
693struct mbuf *
694m_getpackets(int num_needed, int num_with_pkthdrs, int how)
695{
696 struct mbuf *m;
697 struct mbuf **np, *top;
698
699 top = NULL;
700 np = &top;
701
702 m_clalloc(num_needed, how); /* takes the MBUF_LOCK, but doesn't release it... */
703
704 while (num_needed--) {
705 if (mfree && mclfree) { /* mbuf + cluster are available */
706 m = mfree;
707 MCHECK(m);
708 mfree = m->m_next;
709 ++mclrefcnt[mtocl(m)];
710 mbstat.m_mtypes[MT_FREE]--;
711 mbstat.m_mtypes[MT_DATA]++;
712 m->m_ext.ext_buf = (caddr_t)mclfree; /* get the cluster */
713 ++mclrefcnt[mtocl(m->m_ext.ext_buf)];
714 mbstat.m_clfree--;
715 mclfree = ((union mcluster *)(m->m_ext.ext_buf))->mcl_next;
716
717 m->m_next = m->m_nextpkt = 0;
718 m->m_type = MT_DATA;
719 m->m_data = m->m_ext.ext_buf;
720 m->m_ext.ext_free = 0;
721 m->m_ext.ext_size = MCLBYTES;
722 m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward = &m->m_ext.ext_refs;
723
724 if (num_with_pkthdrs == 0)
725 m->m_flags = M_EXT;
726 else {
727 m->m_flags = M_PKTHDR | M_EXT;
728 m->m_pkthdr.len = 0;
729 m->m_pkthdr.rcvif = NULL;
730 m->m_pkthdr.header = NULL;
731 m->m_pkthdr.csum_flags = 0;
732 m->m_pkthdr.csum_data = 0;
733 m->m_pkthdr.aux = (struct mbuf *)NULL;
734 m->m_pkthdr.reserved1 = NULL;
735 m->m_pkthdr.reserved2 = NULL;
736
737 num_with_pkthdrs--;
738 }
739
740 } else {
741
742 MBUF_UNLOCK();
743
744 if (num_with_pkthdrs == 0) {
745 MGET(m, how, MT_DATA );
746 } else {
747 MGETHDR(m, how, MT_DATA);
9bccf70c 748
fa4905b1
A
749 num_with_pkthdrs--;
750 }
751 if (m == 0)
752 return(top);
753
754 MCLGET(m, how);
755 if ((m->m_flags & M_EXT) == 0) {
756 m_free(m);
757 return(top);
758 }
759 MBUF_LOCK();
760 }
761 *np = m;
762
763 if (num_with_pkthdrs)
764 np = &m->m_nextpkt;
765 else
766 np = &m->m_next;
767 }
768 MBUF_UNLOCK();
769
770 return (top);
771}
772
773
9bccf70c
A
774/*
775 * return a list of mbuf hdrs set up as packet hdrs
776 * chained together on the m_nextpkt field
777 */
fa4905b1
A
778struct mbuf *
779m_getpackethdrs(int num_needed, int how)
780{
781 struct mbuf *m;
782 struct mbuf **np, *top;
783
784 top = NULL;
785 np = &top;
786
787 MBUF_LOCK();
788
789 while (num_needed--) {
790 if (m = mfree) { /* mbufs are available */
791 MCHECK(m);
792 mfree = m->m_next;
793 ++mclrefcnt[mtocl(m)];
794 mbstat.m_mtypes[MT_FREE]--;
795 mbstat.m_mtypes[MT_DATA]++;
796
797 m->m_next = m->m_nextpkt = 0;
798 m->m_type = MT_DATA;
799 m->m_flags = M_PKTHDR;
800 m->m_data = m->m_pktdat;
801 m->m_pkthdr.len = 0;
802 m->m_pkthdr.rcvif = NULL;
803 m->m_pkthdr.header = NULL;
804 m->m_pkthdr.csum_flags = 0;
805 m->m_pkthdr.csum_data = 0;
806 m->m_pkthdr.aux = (struct mbuf *)NULL;
807 m->m_pkthdr.reserved1 = NULL;
808 m->m_pkthdr.reserved2 = NULL;
809
810 } else {
811
812 MBUF_UNLOCK();
813
814 m = m_retryhdr(how, MT_DATA);
815
816 if (m == 0)
817 return(top);
818
819 MBUF_LOCK();
820 }
821 *np = m;
822 np = &m->m_nextpkt;
823 }
824 MBUF_UNLOCK();
825
826 return (top);
827}
828
829
1c79356b
A
830/* free and mbuf list (m_nextpkt) while following m_next under one lock.
831 * returns the count for mbufs packets freed. Used by the drivers.
832 */
833int
834m_freem_list(m)
835 struct mbuf *m;
836{
837 struct mbuf *nextpkt;
fa4905b1 838 int i, count=0;
1c79356b 839
1c79356b 840 MBUF_LOCK();
fa4905b1 841
1c79356b
A
842 while (m) {
843 if (m)
fa4905b1 844 nextpkt = m->m_nextpkt; /* chain of linked mbufs from driver */
1c79356b 845 else
fa4905b1 846 nextpkt = 0;
9bccf70c 847
1c79356b 848 count++;
fa4905b1 849
1c79356b 850 while (m) { /* free the mbuf chain (like mfreem) */
9bccf70c
A
851
852 struct mbuf *n;
853
854 /* Free the aux data if there is any */
855 if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.aux) {
856 /*
857 * Treat the current m as the nextpkt and set m
858 * to the aux data. This lets us free the aux
859 * data in this loop without having to call
860 * m_freem recursively, which wouldn't work
861 * because we've still got the lock.
862 */
863 nextpkt = m;
864 m = nextpkt->m_pkthdr.aux;
865 nextpkt->m_pkthdr.aux = NULL;
866 }
867
868 n = m->m_next;
fa4905b1 869
1c79356b
A
870 if (n && n->m_nextpkt)
871 panic("m_freem_list: m_nextpkt of m_next != NULL");
872 if (m->m_type == MT_FREE)
873 panic("freeing free mbuf");
fa4905b1 874
1c79356b
A
875 if (m->m_flags & M_EXT) {
876 if (MCLHASREFERENCE(m)) {
877 remque((queue_t)&m->m_ext.ext_refs);
878 } else if (m->m_ext.ext_free == NULL) {
879 union mcluster *mcl= (union mcluster *)m->m_ext.ext_buf;
9bccf70c 880 if (_MCLUNREF(mcl)) {
1c79356b
A
881 mcl->mcl_next = mclfree;
882 mclfree = mcl;
883 ++mbstat.m_clfree;
884 }
885 } else {
886 (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
887 m->m_ext.ext_size, m->m_ext.ext_arg);
888 }
889 }
890 mbstat.m_mtypes[m->m_type]--;
9bccf70c
A
891 (void) _MCLUNREF(m);
892 _MFREE_MUNGE(m);
fa4905b1 893 mbstat.m_mtypes[MT_FREE]++;
1c79356b 894 m->m_type = MT_FREE;
1c79356b
A
895 m->m_flags = 0;
896 m->m_len = 0;
897 m->m_next = mfree;
898 mfree = m;
899 m = n;
900 }
901 m = nextpkt; /* bump m with saved nextpkt if any */
902 }
fa4905b1
A
903 if (i = m_want)
904 m_want = 0;
905
1c79356b 906 MBUF_UNLOCK();
fa4905b1
A
907
908 if (i)
909 wakeup((caddr_t)&mfree);
910
1c79356b
A
911 return (count);
912}
913
914void
915m_freem(m)
916 register struct mbuf *m;
917{
918 while (m)
919 m = m_free(m);
920}
921
922/*
923 * Mbuffer utility routines.
924 */
925/*
926 * Compute the amount of space available
927 * before the current start of data in an mbuf.
928 */
929m_leadingspace(m)
930register struct mbuf *m;
931{
932 if (m->m_flags & M_EXT) {
933 if (MCLHASREFERENCE(m))
934 return(0);
935 return (m->m_data - m->m_ext.ext_buf);
936 }
937 if (m->m_flags & M_PKTHDR)
938 return (m->m_data - m->m_pktdat);
939 return (m->m_data - m->m_dat);
940}
941
942/*
943 * Compute the amount of space available
944 * after the end of data in an mbuf.
945 */
946m_trailingspace(m)
947register struct mbuf *m;
948{
949 if (m->m_flags & M_EXT) {
950 if (MCLHASREFERENCE(m))
951 return(0);
952 return (m->m_ext.ext_buf + m->m_ext.ext_size -
953 (m->m_data + m->m_len));
954 }
955 return (&m->m_dat[MLEN] - (m->m_data + m->m_len));
956}
957
958/*
959 * Lesser-used path for M_PREPEND:
960 * allocate new mbuf to prepend to chain,
961 * copy junk along.
9bccf70c 962 * Does not adjust packet header length.
1c79356b
A
963 */
964struct mbuf *
965m_prepend(m, len, how)
966 register struct mbuf *m;
967 int len, how;
968{
969 struct mbuf *mn;
970
971 MGET(mn, how, m->m_type);
972 if (mn == (struct mbuf *)NULL) {
973 m_freem(m);
974 return ((struct mbuf *)NULL);
975 }
976 if (m->m_flags & M_PKTHDR) {
977 M_COPY_PKTHDR(mn, m);
978 m->m_flags &= ~M_PKTHDR;
979 }
980 mn->m_next = m;
981 m = mn;
982 if (len < MHLEN)
983 MH_ALIGN(m, len);
984 m->m_len = len;
985 return (m);
986}
987
9bccf70c
A
988/*
989 * Replacement for old M_PREPEND macro:
990 * allocate new mbuf to prepend to chain,
991 * copy junk along, and adjust length.
992 *
993 */
994struct mbuf *
995m_prepend_2(m, len, how)
996 register struct mbuf *m;
997 int len, how;
998{
999 if (M_LEADINGSPACE(m) >= len) {
1000 m->m_data -= len;
1001 m->m_len += len;
1002 } else {
1003 m = m_prepend(m, len, how);
1004 }
1005 if ((m) && (m->m_flags & M_PKTHDR))
1006 m->m_pkthdr.len += len;
1007 return (m);
1008}
1009
1c79356b
A
1010/*
1011 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1012 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
1013 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
1014 */
1015int MCFail;
1016
1017struct mbuf *
1018m_copym(m, off0, len, wait)
1019 register struct mbuf *m;
1020 int off0, wait;
1021 register int len;
1022{
1023 register struct mbuf *n, **np;
1024 register int off = off0;
1025 struct mbuf *top;
1026 int copyhdr = 0;
1027
1028 if (off < 0 || len < 0)
1029 panic("m_copym");
1030 if (off == 0 && m->m_flags & M_PKTHDR)
1031 copyhdr = 1;
fa4905b1
A
1032
1033 while (off >= m->m_len) {
1c79356b
A
1034 if (m == 0)
1035 panic("m_copym");
1c79356b
A
1036 off -= m->m_len;
1037 m = m->m_next;
1038 }
1039 np = &top;
1040 top = 0;
fa4905b1
A
1041
1042 MBUF_LOCK();
1043
1c79356b
A
1044 while (len > 0) {
1045 if (m == 0) {
1046 if (len != M_COPYALL)
1047 panic("m_copym");
1048 break;
1049 }
fa4905b1
A
1050 if (n = mfree) {
1051 MCHECK(n);
1052 ++mclrefcnt[mtocl(n)];
1053 mbstat.m_mtypes[MT_FREE]--;
1054 mbstat.m_mtypes[m->m_type]++;
1055 mfree = n->m_next;
1056 n->m_next = n->m_nextpkt = 0;
1057 n->m_type = m->m_type;
1058 n->m_data = n->m_dat;
1059 n->m_flags = 0;
1060 } else {
1061 MBUF_UNLOCK();
1062 n = m_retry(wait, m->m_type);
1063 MBUF_LOCK();
1064 }
1c79356b 1065 *np = n;
fa4905b1 1066
1c79356b
A
1067 if (n == 0)
1068 goto nospace;
1069 if (copyhdr) {
1070 M_COPY_PKTHDR(n, m);
1071 if (len == M_COPYALL)
1072 n->m_pkthdr.len -= off0;
1073 else
1074 n->m_pkthdr.len = len;
1075 copyhdr = 0;
1076 }
1077 if (len == M_COPYALL) {
1078 if (min(len, (m->m_len - off)) == len) {
1079 printf("m->m_len %d - off %d = %d, %d\n",
1080 m->m_len, off, m->m_len - off,
1081 min(len, (m->m_len - off)));
1082 }
1083 }
1084 n->m_len = min(len, (m->m_len - off));
1085 if (n->m_len == M_COPYALL) {
1086 printf("n->m_len == M_COPYALL, fixing\n");
1087 n->m_len = MHLEN;
1088 }
1089 if (m->m_flags & M_EXT) {
1c79356b
A
1090 n->m_ext = m->m_ext;
1091 insque((queue_t)&n->m_ext.ext_refs, (queue_t)&m->m_ext.ext_refs);
1c79356b
A
1092 n->m_data = m->m_data + off;
1093 n->m_flags |= M_EXT;
fa4905b1 1094 } else {
1c79356b
A
1095 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1096 (unsigned)n->m_len);
fa4905b1 1097 }
1c79356b
A
1098 if (len != M_COPYALL)
1099 len -= n->m_len;
1100 off = 0;
1101 m = m->m_next;
1102 np = &n->m_next;
1103 }
fa4905b1
A
1104 MBUF_UNLOCK();
1105
1c79356b
A
1106 if (top == 0)
1107 MCFail++;
fa4905b1 1108
1c79356b
A
1109 return (top);
1110nospace:
fa4905b1
A
1111 MBUF_UNLOCK();
1112
1c79356b
A
1113 m_freem(top);
1114 MCFail++;
1115 return (0);
1116}
1117
fa4905b1 1118
9bccf70c
A
1119/*
1120 * equivilent to m_copym except that all necessary
1121 * mbuf hdrs are allocated within this routine
1122 * also, the last mbuf and offset accessed are passed
1123 * out and can be passed back in to avoid having to
1124 * rescan the entire mbuf list (normally hung off of the socket)
1125 */
fa4905b1
A
1126struct mbuf *
1127m_copym_with_hdrs(m, off0, len, wait, m_last, m_off)
1128 register struct mbuf *m;
1129 int off0, wait;
1130 register int len;
1131 struct mbuf **m_last;
1132 int *m_off;
1133{
1134 register struct mbuf *n, **np;
1135 register int off = off0;
1136 struct mbuf *top = 0;
1137 int copyhdr = 0;
1138 int type;
1139
1140 if (off == 0 && m->m_flags & M_PKTHDR)
1141 copyhdr = 1;
1142
1143 if (*m_last) {
1144 m = *m_last;
1145 off = *m_off;
1146 } else {
1147 while (off >= m->m_len) {
1148 off -= m->m_len;
1149 m = m->m_next;
1150 }
1151 }
1152 MBUF_LOCK();
1153
1154 while (len > 0) {
1155 if (top == 0)
1156 type = MT_HEADER;
1157 else {
1158 if (m == 0)
1159 panic("m_gethdr_and_copym");
1160 type = m->m_type;
1161 }
1162 if (n = mfree) {
1163 MCHECK(n);
1164 ++mclrefcnt[mtocl(n)];
1165 mbstat.m_mtypes[MT_FREE]--;
1166 mbstat.m_mtypes[type]++;
1167 mfree = n->m_next;
1168 n->m_next = n->m_nextpkt = 0;
1169 n->m_type = type;
1170
1171 if (top) {
1172 n->m_data = n->m_dat;
1173 n->m_flags = 0;
1174 } else {
1175 n->m_data = n->m_pktdat;
1176 n->m_flags = M_PKTHDR;
1177 n->m_pkthdr.len = 0;
1178 n->m_pkthdr.rcvif = NULL;
1179 n->m_pkthdr.header = NULL;
1180 n->m_pkthdr.csum_flags = 0;
1181 n->m_pkthdr.csum_data = 0;
1182 n->m_pkthdr.aux = (struct mbuf *)NULL;
1183 n->m_pkthdr.reserved1 = NULL;
1184 n->m_pkthdr.reserved2 = NULL;
1185 }
1186 } else {
1187 MBUF_UNLOCK();
1188 if (top)
1189 n = m_retry(wait, type);
1190 else
1191 n = m_retryhdr(wait, type);
1192 MBUF_LOCK();
1193 }
1194 if (n == 0)
1195 goto nospace;
1196 if (top == 0) {
1197 top = n;
1198 np = &top->m_next;
1199 continue;
1200 } else
1201 *np = n;
1202
1203 if (copyhdr) {
1204 M_COPY_PKTHDR(n, m);
1205 n->m_pkthdr.len = len;
1206 copyhdr = 0;
1207 }
1208 n->m_len = min(len, (m->m_len - off));
1209
1210 if (m->m_flags & M_EXT) {
1211 n->m_ext = m->m_ext;
1212 insque((queue_t)&n->m_ext.ext_refs, (queue_t)&m->m_ext.ext_refs);
1213 n->m_data = m->m_data + off;
1214 n->m_flags |= M_EXT;
1215 } else {
1216 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1217 (unsigned)n->m_len);
1218 }
1219 len -= n->m_len;
1220
1221 if (len == 0) {
1222 if ((off + n->m_len) == m->m_len) {
1223 *m_last = m->m_next;
1224 *m_off = 0;
1225 } else {
1226 *m_last = m;
1227 *m_off = off + n->m_len;
1228 }
1229 break;
1230 }
1231 off = 0;
1232 m = m->m_next;
1233 np = &n->m_next;
1234 }
1235 MBUF_UNLOCK();
1236
1237 return (top);
1238nospace:
1239 MBUF_UNLOCK();
1240
1241 if (top)
1242 m_freem(top);
1243 MCFail++;
1244 return (0);
1245}
1246
1247
1c79356b
A
1248/*
1249 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1250 * continuing for "len" bytes, into the indicated buffer.
1251 */
1252void m_copydata(m, off, len, cp)
1253 register struct mbuf *m;
1254 register int off;
1255 register int len;
1256 caddr_t cp;
1257{
1258 register unsigned count;
1259
1260 if (off < 0 || len < 0)
1261 panic("m_copydata");
1262 while (off > 0) {
1263 if (m == 0)
1264 panic("m_copydata");
1265 if (off < m->m_len)
1266 break;
1267 off -= m->m_len;
1268 m = m->m_next;
1269 }
1270 while (len > 0) {
1271 if (m == 0)
1272 panic("m_copydata");
1273 count = min(m->m_len - off, len);
1274 bcopy(mtod(m, caddr_t) + off, cp, count);
1275 len -= count;
1276 cp += count;
1277 off = 0;
1278 m = m->m_next;
1279 }
1280}
1281
1282/*
1283 * Concatenate mbuf chain n to m.
1284 * Both chains must be of the same type (e.g. MT_DATA).
1285 * Any m_pkthdr is not updated.
1286 */
1287void m_cat(m, n)
1288 register struct mbuf *m, *n;
1289{
1290 while (m->m_next)
1291 m = m->m_next;
1292 while (n) {
1293 if (m->m_flags & M_EXT ||
1294 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1295 /* just join the two chains */
1296 m->m_next = n;
1297 return;
1298 }
1299 /* splat the data from one into the other */
1300 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1301 (u_int)n->m_len);
1302 m->m_len += n->m_len;
1303 n = m_free(n);
1304 }
1305}
1306
1307void
1308m_adj(mp, req_len)
1309 struct mbuf *mp;
1310 int req_len;
1311{
1312 register int len = req_len;
1313 register struct mbuf *m;
1314 register count;
1315
1316 if ((m = mp) == NULL)
1317 return;
1318 if (len >= 0) {
1319 /*
1320 * Trim from head.
1321 */
1322 while (m != NULL && len > 0) {
1323 if (m->m_len <= len) {
1324 len -= m->m_len;
1325 m->m_len = 0;
1326 m = m->m_next;
1327 } else {
1328 m->m_len -= len;
1329 m->m_data += len;
1330 len = 0;
1331 }
1332 }
1333 m = mp;
1334 if (m->m_flags & M_PKTHDR)
1335 m->m_pkthdr.len -= (req_len - len);
1336 } else {
1337 /*
1338 * Trim from tail. Scan the mbuf chain,
1339 * calculating its length and finding the last mbuf.
1340 * If the adjustment only affects this mbuf, then just
1341 * adjust and return. Otherwise, rescan and truncate
1342 * after the remaining size.
1343 */
1344 len = -len;
1345 count = 0;
1346 for (;;) {
1347 count += m->m_len;
1348 if (m->m_next == (struct mbuf *)0)
1349 break;
1350 m = m->m_next;
1351 }
1352 if (m->m_len >= len) {
1353 m->m_len -= len;
1354 m = mp;
1355 if (m->m_flags & M_PKTHDR)
1356 m->m_pkthdr.len -= len;
1357 return;
1358 }
1359 count -= len;
1360 if (count < 0)
1361 count = 0;
1362 /*
1363 * Correct length for chain is "count".
1364 * Find the mbuf with last data, adjust its length,
1365 * and toss data from remaining mbufs on chain.
1366 */
1367 m = mp;
1368 if (m->m_flags & M_PKTHDR)
1369 m->m_pkthdr.len = count;
1370 for (; m; m = m->m_next) {
1371 if (m->m_len >= count) {
1372 m->m_len = count;
1373 break;
1374 }
1375 count -= m->m_len;
1376 }
1377 while (m = m->m_next)
1378 m->m_len = 0;
1379 }
1380}
1381
1382/*
1383 * Rearange an mbuf chain so that len bytes are contiguous
1384 * and in the data area of an mbuf (so that mtod and dtom
1385 * will work for a structure of size len). Returns the resulting
1386 * mbuf chain on success, frees it and returns null on failure.
1387 * If there is room, it will add up to max_protohdr-len extra bytes to the
1388 * contiguous region in an attempt to avoid being called next time.
1389 */
1390int MPFail;
1391
1392struct mbuf *
1393m_pullup(n, len)
1394 register struct mbuf *n;
1395 int len;
1396{
1397 register struct mbuf *m;
1398 register int count;
1399 int space;
1400
1401 /*
1402 * If first mbuf has no cluster, and has room for len bytes
1403 * without shifting current data, pullup into it,
1404 * otherwise allocate a new mbuf to prepend to the chain.
1405 */
1406 if ((n->m_flags & M_EXT) == 0 &&
1407 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1408 if (n->m_len >= len)
1409 return (n);
1410 m = n;
1411 n = n->m_next;
1412 len -= m->m_len;
1413 } else {
1414 if (len > MHLEN)
1415 goto bad;
1416 MGET(m, M_DONTWAIT, n->m_type);
1417 if (m == 0)
1418 goto bad;
1419 m->m_len = 0;
1420 if (n->m_flags & M_PKTHDR) {
1421 M_COPY_PKTHDR(m, n);
1422 n->m_flags &= ~M_PKTHDR;
1423 }
1424 }
1425 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1426 do {
1427 count = min(min(max(len, max_protohdr), space), n->m_len);
1428 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1429 (unsigned)count);
1430 len -= count;
1431 m->m_len += count;
1432 n->m_len -= count;
1433 space -= count;
1434 if (n->m_len)
1435 n->m_data += count;
1436 else
1437 n = m_free(n);
1438 } while (len > 0 && n);
1439 if (len > 0) {
1440 (void) m_free(m);
1441 goto bad;
1442 }
1443 m->m_next = n;
1444 return (m);
1445bad:
1446 m_freem(n);
1447 MPFail++;
1448 return (0);
1449}
1450
1451/*
1452 * Partition an mbuf chain in two pieces, returning the tail --
1453 * all but the first len0 bytes. In case of failure, it returns NULL and
1454 * attempts to restore the chain to its original state.
1455 */
1456struct mbuf *
1457m_split(m0, len0, wait)
1458 register struct mbuf *m0;
1459 int len0, wait;
1460{
1461 register struct mbuf *m, *n;
1462 unsigned len = len0, remain;
1463
1464 for (m = m0; m && len > m->m_len; m = m->m_next)
1465 len -= m->m_len;
1466 if (m == 0)
1467 return (0);
1468 remain = m->m_len - len;
1469 if (m0->m_flags & M_PKTHDR) {
1470 MGETHDR(n, wait, m0->m_type);
1471 if (n == 0)
1472 return (0);
1473 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1474 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1475 m0->m_pkthdr.len = len0;
1476 if (m->m_flags & M_EXT)
1477 goto extpacket;
1478 if (remain > MHLEN) {
1479 /* m can't be the lead packet */
1480 MH_ALIGN(n, 0);
1481 n->m_next = m_split(m, len, wait);
1482 if (n->m_next == 0) {
1483 (void) m_free(n);
1484 return (0);
1485 } else
1486 return (n);
1487 } else
1488 MH_ALIGN(n, remain);
1489 } else if (remain == 0) {
1490 n = m->m_next;
1491 m->m_next = 0;
1492 return (n);
1493 } else {
1494 MGET(n, wait, m->m_type);
1495 if (n == 0)
1496 return (0);
1497 M_ALIGN(n, remain);
1498 }
1499extpacket:
1500 if (m->m_flags & M_EXT) {
1501 n->m_flags |= M_EXT;
1c79356b 1502 MBUF_LOCK();
0b4e3aa0
A
1503 n->m_ext = m->m_ext;
1504 insque((queue_t)&n->m_ext.ext_refs, (queue_t)&m->m_ext.ext_refs);
1c79356b 1505 MBUF_UNLOCK();
1c79356b
A
1506 n->m_data = m->m_data + len;
1507 } else {
1508 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1509 }
1510 n->m_len = remain;
1511 m->m_len = len;
1512 n->m_next = m->m_next;
1513 m->m_next = 0;
1514 return (n);
1515}
1516/*
1517 * Routine to copy from device local memory into mbufs.
1518 */
1519struct mbuf *
1520m_devget(buf, totlen, off0, ifp, copy)
1521 char *buf;
1522 int totlen, off0;
1523 struct ifnet *ifp;
1524 void (*copy)();
1525{
1526 register struct mbuf *m;
1527 struct mbuf *top = 0, **mp = &top;
1528 register int off = off0, len;
1529 register char *cp;
1530 char *epkt;
1531
1532 cp = buf;
1533 epkt = cp + totlen;
1534 if (off) {
1535 /*
1536 * If 'off' is non-zero, packet is trailer-encapsulated,
1537 * so we have to skip the type and length fields.
1538 */
1539 cp += off + 2 * sizeof(u_int16_t);
1540 totlen -= 2 * sizeof(u_int16_t);
1541 }
1542 MGETHDR(m, M_DONTWAIT, MT_DATA);
1543 if (m == 0)
1544 return (0);
1545 m->m_pkthdr.rcvif = ifp;
1546 m->m_pkthdr.len = totlen;
1547 m->m_len = MHLEN;
1548
1549 while (totlen > 0) {
1550 if (top) {
1551 MGET(m, M_DONTWAIT, MT_DATA);
1552 if (m == 0) {
1553 m_freem(top);
1554 return (0);
1555 }
1556 m->m_len = MLEN;
1557 }
1558 len = min(totlen, epkt - cp);
1559 if (len >= MINCLSIZE) {
1560 MCLGET(m, M_DONTWAIT);
1561 if (m->m_flags & M_EXT)
1562 m->m_len = len = min(len, MCLBYTES);
1563 else {
1564 /* give up when it's out of cluster mbufs */
1565 if (top)
1566 m_freem(top);
1567 m_freem(m);
1568 return (0);
1569 }
1570 } else {
1571 /*
1572 * Place initial small packet/header at end of mbuf.
1573 */
1574 if (len < m->m_len) {
1575 if (top == 0 && len + max_linkhdr <= m->m_len)
1576 m->m_data += max_linkhdr;
1577 m->m_len = len;
1578 } else
1579 len = m->m_len;
1580 }
1581 if (copy)
1582 copy(cp, mtod(m, caddr_t), (unsigned)len);
1583 else
1584 bcopy(cp, mtod(m, caddr_t), (unsigned)len);
1585 cp += len;
1586 *mp = m;
1587 mp = &m->m_next;
1588 totlen -= len;
1589 if (cp == epkt)
1590 cp = buf;
1591 }
1592 return (top);
1593}
1594
1595/*
1596 * Cluster freelist allocation check. The mbuf lock must be held.
1597 * Ensure hysteresis between hi/lo.
1598 */
1599static int
1600m_howmany()
1601{
1602 register int i;
1603
1604 /* Under minimum */
1605 if (mbstat.m_clusters < MINCL)
1606 return (MINCL - mbstat.m_clusters);
1607 /* Too few (free < 1/2 total) and not over maximum */
1608 if (mbstat.m_clusters < nmbclusters &&
1609 (i = ((mbstat.m_clusters >> 1) - mbstat.m_clfree)) > 0)
1610 return i;
1611 return 0;
1612}
1613
1614
1615/*
1616 * Copy data from a buffer back into the indicated mbuf chain,
1617 * starting "off" bytes from the beginning, extending the mbuf
1618 * chain if necessary.
1619 */
1620void
1621m_copyback(m0, off, len, cp)
1622 struct mbuf *m0;
1623 register int off;
1624 register int len;
1625 caddr_t cp;
1626{
1627 register int mlen;
1628 register struct mbuf *m = m0, *n;
1629 int totlen = 0;
1630
1631 if (m0 == 0)
1632 return;
1633 while (off > (mlen = m->m_len)) {
1634 off -= mlen;
1635 totlen += mlen;
1636 if (m->m_next == 0) {
1637 n = m_getclr(M_DONTWAIT, m->m_type);
1638 if (n == 0)
1639 goto out;
1640 n->m_len = min(MLEN, len + off);
1641 m->m_next = n;
1642 }
1643 m = m->m_next;
1644 }
1645 while (len > 0) {
1646 mlen = min (m->m_len - off, len);
1647 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1648 cp += mlen;
1649 len -= mlen;
1650 mlen += off;
1651 off = 0;
1652 totlen += mlen;
1653 if (len == 0)
1654 break;
1655 if (m->m_next == 0) {
1656 n = m_get(M_DONTWAIT, m->m_type);
1657 if (n == 0)
1658 break;
1659 n->m_len = min(MLEN, len);
1660 m->m_next = n;
1661 }
1662 m = m->m_next;
1663 }
1664out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1665 m->m_pkthdr.len = totlen;
1666}
1667
1668
1669char *mcl_to_paddr(register char *addr) {
1670 register int base_phys;
1671
1672 if (addr < (char *)mbutl || addr >= (char *)embutl)
1673 return (0);
1674 base_phys = mcl_paddr[(addr - (char *)mbutl) >> PAGE_SHIFT];
1675
1676 if (base_phys == 0)
1677 return (0);
1678 return ((char *)((int)base_phys | ((int)addr & PAGE_MASK)));
1679}
1680
1681/*
1682 * Dup the mbuf chain passed in. The whole thing. No cute additional cruft.
1683 * And really copy the thing. That way, we don't "precompute" checksums
1684 * for unsuspecting consumers.
1685 * Assumption: m->m_nextpkt == 0.
1686 * Trick: for small packets, don't dup into a cluster. That way received
1687 * packets don't take up too much room in the sockbuf (cf. sbspace()).
1688 */
1689int MDFail;
1690
1691struct mbuf *
1692m_dup(register struct mbuf *m, int how)
1693{ register struct mbuf *n, **np;
1694 struct mbuf *top;
1695 int copyhdr = 0;
1696
1697 np = &top;
1698 top = 0;
1699 if (m->m_flags & M_PKTHDR)
1700 copyhdr = 1;
1701
1702 /*
1703 * Quick check: if we have one mbuf and its data fits in an
1704 * mbuf with packet header, just copy and go.
1705 */
1706 if (m->m_next == NULL)
1707 { /* Then just move the data into an mbuf and be done... */
1708 if (copyhdr)
1709 { if (m->m_pkthdr.len <= MHLEN)
1710 { if ((n = m_gethdr(how, m->m_type)) == NULL)
1711 return(NULL);
1712 n->m_len = m->m_len;
1713 n->m_flags |= (m->m_flags & M_COPYFLAGS);
1714 n->m_pkthdr.len = m->m_pkthdr.len;
1715 n->m_pkthdr.rcvif = m->m_pkthdr.rcvif;
1716 n->m_pkthdr.header = NULL;
fa4905b1
A
1717 n->m_pkthdr.csum_flags = 0;
1718 n->m_pkthdr.csum_data = 0;
1c79356b 1719 n->m_pkthdr.aux = NULL;
fa4905b1
A
1720 n->m_pkthdr.reserved1 = 0;
1721 n->m_pkthdr.reserved2 = 0;
1c79356b
A
1722 bcopy(m->m_data, n->m_data, m->m_pkthdr.len);
1723 return(n);
1724 }
1725 } else if (m->m_len <= MLEN)
1726 { if ((n = m_get(how, m->m_type)) == NULL)
1727 return(NULL);
1728 bcopy(m->m_data, n->m_data, m->m_len);
1729 n->m_len = m->m_len;
1730 return(n);
1731 }
1732 }
1733 while (m)
1734 {
1735#if BLUE_DEBUG
1736 kprintf("<%x: %x, %x, %x\n", m, m->m_flags, m->m_len,
1737 m->m_data);
1738#endif
1739 if (copyhdr)
1740 n = m_gethdr(how, m->m_type);
1741 else
1742 n = m_get(how, m->m_type);
1743 if (n == 0)
1744 goto nospace;
1745 if (m->m_flags & M_EXT)
1746 { MCLGET(n, how);
1747 if ((n->m_flags & M_EXT) == 0)
1748 goto nospace;
1749 }
1750 *np = n;
1751 if (copyhdr)
1752 { /* Don't use M_COPY_PKTHDR: preserve m_data */
1753 n->m_pkthdr = m->m_pkthdr;
1754 n->m_flags |= (m->m_flags & M_COPYFLAGS);
1755 copyhdr = 0;
1756 if ((n->m_flags & M_EXT) == 0)
1757 n->m_data = n->m_pktdat;
1758 }
1759 n->m_len = m->m_len;
1760 /*
1761 * Get the dup on the same bdry as the original
1762 * Assume that the two mbufs have the same offset to data area
1763 * (up to word bdries)
1764 */
1765 bcopy(mtod(m, caddr_t), mtod(n, caddr_t), (unsigned)n->m_len);
1766 m = m->m_next;
1767 np = &n->m_next;
1768#if BLUE_DEBUG
1769 kprintf(">%x: %x, %x, %x\n", n, n->m_flags, n->m_len,
1770 n->m_data);
1771#endif
1772 }
1773
1774 if (top == 0)
1775 MDFail++;
1776 return (top);
1777 nospace:
1778 m_freem(top);
1779 MDFail++;
1780 return (0);
1781}
1782
9bccf70c
A
1783int
1784m_mclref(struct mbuf *p)
1785{
1786 return (_MCLREF(p));
1787}
1788
1789int
1790m_mclunref(struct mbuf *p)
1791{
1792 return (_MCLUNREF(p));
1793}
1794
1795/* change mbuf to new type */
1796void
1797m_mchtype(struct mbuf *m, int t)
1798{
1799 MBUF_LOCK();
1800 mbstat.m_mtypes[(m)->m_type]--;
1801 mbstat.m_mtypes[t]++;
1802 (m)->m_type = t;
1803 MBUF_UNLOCK();
1804}
1805
1806void *m_mtod(struct mbuf *m)
1807{
1808 return ((m)->m_data);
1809}
1810
1811struct mbuf *m_dtom(void *x)
1812{
1813 return ((struct mbuf *)((u_long)(x) & ~(MSIZE-1)));
1814}
1815
1816int m_mtocl(void *x)
1817{
1818 return (((char *)(x) - (char *)mbutl) / sizeof(union mcluster));
1819}
1820
1821union mcluster *m_cltom(int x)
1822{
1823 return ((union mcluster *)(mbutl + (x)));
1824}
1825
1826
1827void m_mcheck(struct mbuf *m)
1828{
1829 if (m->m_type != MT_FREE)
1830 panic("mget MCHECK: m_type=%x m=%x", m->m_type, m);
1831}
1832
1c79356b
A
1833#if 0
1834#include <sys/sysctl.h>
1835
1836static int mhog_num = 0;
1837static struct mbuf *mhog_chain = 0;
1838static int mhog_wait = 1;
1839
1840static int
1841sysctl_mhog_num SYSCTL_HANDLER_ARGS
1842{
1843 int old = mhog_num;
1844 int error;
1845
1846 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
1847 if (!error && req->newptr) {
1848 int i;
1849 struct mbuf *m;
1850
1851 if (mhog_chain) {
1852 m_freem(mhog_chain);
1853 mhog_chain = 0;
1854 }
1855
1856 for (i = 0; i < mhog_num; i++) {
1857 MGETHDR(m, mhog_wait ? M_WAIT : M_DONTWAIT, MT_DATA);
1858 if (m == 0)
1859 break;
1860
1861 MCLGET(m, mhog_wait ? M_WAIT : M_DONTWAIT);
1862 if ((m->m_flags & M_EXT) == 0) {
1863 m_free(m);
1864 m = 0;
1865 break;
1866 }
1867 m->m_next = mhog_chain;
1868 mhog_chain = m;
1869 }
1870 mhog_num = i;
1871 }
1872
1873 return error;
1874}
1875
1876SYSCTL_NODE(_kern_ipc, OID_AUTO, mhog, CTLFLAG_RW, 0, "mbuf hog");
1877
1878SYSCTL_PROC(_kern_ipc_mhog, OID_AUTO, cluster, CTLTYPE_INT|CTLFLAG_RW,
1879 &mhog_num, 0, &sysctl_mhog_num, "I", "");
1880SYSCTL_INT(_kern_ipc_mhog, OID_AUTO, wait, CTLFLAG_RW, &mhog_wait,
1881 0, "");
1882#endif
1883