]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
5d5c5d0d A |
2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. |
3 | * | |
8f6c56a5 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
8f6c56a5 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
8ad349bb | 24 | * limitations under the License. |
8f6c56a5 A |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
1c79356b A |
27 | */ |
28 | /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ | |
29 | /* | |
30 | * Copyright (c) 1982, 1986, 1988, 1991, 1993 | |
31 | * The Regents of the University of California. All rights reserved. | |
32 | * | |
33 | * Redistribution and use in source and binary forms, with or without | |
34 | * modification, are permitted provided that the following conditions | |
35 | * are met: | |
36 | * 1. Redistributions of source code must retain the above copyright | |
37 | * notice, this list of conditions and the following disclaimer. | |
38 | * 2. Redistributions in binary form must reproduce the above copyright | |
39 | * notice, this list of conditions and the following disclaimer in the | |
40 | * documentation and/or other materials provided with the distribution. | |
41 | * 3. All advertising materials mentioning features or use of this software | |
42 | * must display the following acknowledgement: | |
43 | * This product includes software developed by the University of | |
44 | * California, Berkeley and its contributors. | |
45 | * 4. Neither the name of the University nor the names of its contributors | |
46 | * may be used to endorse or promote products derived from this software | |
47 | * without specific prior written permission. | |
48 | * | |
49 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
50 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
51 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
52 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
53 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
54 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
55 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
56 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
57 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
58 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
59 | * SUCH DAMAGE. | |
60 | * | |
61 | * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 | |
62 | */ | |
63 | /* HISTORY | |
64 | * | |
65 | * 10/15/97 Annette DeSchon (deschon@apple.com) | |
66 | * Fixed bug in which all cluster mbufs were broken up | |
67 | * into regular mbufs: Some clusters are now reserved. | |
68 | * When a cluster is needed, regular mbufs are no longer | |
69 | * used. (Radar 1683621) | |
70 | * 20-May-95 Mac Gillon (mgillon) at NeXT | |
71 | * New version based on 4.4 | |
72 | */ | |
73 | ||
74 | #include <sys/param.h> | |
75 | #include <sys/systm.h> | |
76 | #include <sys/malloc.h> | |
77 | #include <sys/mbuf.h> | |
78 | #include <sys/kernel.h> | |
91447636 | 79 | #include <sys/sysctl.h> |
1c79356b A |
80 | #include <sys/syslog.h> |
81 | #include <sys/protosw.h> | |
82 | #include <sys/domain.h> | |
1c79356b A |
83 | |
84 | #include <kern/queue.h> | |
9bccf70c A |
85 | #include <kern/kern_types.h> |
86 | #include <kern/sched_prim.h> | |
87 | ||
55e303ae A |
88 | #include <IOKit/IOMapper.h> |
89 | ||
91447636 A |
90 | extern vm_offset_t kmem_mb_alloc(vm_map_t , int ); |
91 | extern boolean_t PE_parse_boot_arg(const char *, void *); | |
92 | ||
9bccf70c A |
93 | #define _MCLREF(p) (++mclrefcnt[mtocl(p)]) |
94 | #define _MCLUNREF(p) (--mclrefcnt[mtocl(p)] == 0) | |
55e303ae A |
95 | #define _M_CLEAR_PKTHDR(mbuf_ptr) (mbuf_ptr)->m_pkthdr.rcvif = NULL; \ |
96 | (mbuf_ptr)->m_pkthdr.len = 0; \ | |
97 | (mbuf_ptr)->m_pkthdr.header = NULL; \ | |
98 | (mbuf_ptr)->m_pkthdr.csum_flags = 0; \ | |
99 | (mbuf_ptr)->m_pkthdr.csum_data = 0; \ | |
100 | (mbuf_ptr)->m_pkthdr.aux = (struct mbuf*)NULL; \ | |
4a249263 | 101 | (mbuf_ptr)->m_pkthdr.vlan_tag = 0; \ |
91447636 A |
102 | (mbuf_ptr)->m_pkthdr.socket_id = 0; \ |
103 | SLIST_INIT(&(mbuf_ptr)->m_pkthdr.tags); | |
55e303ae | 104 | |
55e303ae A |
105 | /* kernel translater */ |
106 | extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); | |
1c79356b | 107 | |
91447636 A |
108 | lck_mtx_t * mbuf_mlock; |
109 | lck_grp_t * mbuf_mlock_grp; | |
110 | lck_grp_attr_t * mbuf_mlock_grp_attr; | |
111 | lck_attr_t * mbuf_mlock_attr; | |
112 | extern lck_mtx_t *domain_proto_mtx; | |
113 | ||
1c79356b A |
114 | struct mbuf *mfree; /* mbuf free list */ |
115 | struct mbuf *mfreelater; /* mbuf deallocation list */ | |
116 | extern vm_map_t mb_map; /* special map */ | |
117 | int m_want; /* sleepers on mbufs */ | |
1c79356b A |
118 | short *mclrefcnt; /* mapped cluster reference counts */ |
119 | int *mcl_paddr; | |
55e303ae | 120 | static ppnum_t mcl_paddr_base; /* Handle returned by IOMapper::iovmAlloc() */ |
1c79356b | 121 | union mcluster *mclfree; /* mapped cluster free list */ |
91447636 | 122 | union mbigcluster *mbigfree; /* mapped cluster free list */ |
1c79356b A |
123 | int max_linkhdr; /* largest link-level header */ |
124 | int max_protohdr; /* largest protocol header */ | |
125 | int max_hdr; /* largest link+protocol header */ | |
126 | int max_datalen; /* MHLEN - max_hdr */ | |
127 | struct mbstat mbstat; /* statistics */ | |
128 | union mcluster *mbutl; /* first mapped cluster address */ | |
129 | union mcluster *embutl; /* ending virtual address of mclusters */ | |
130 | ||
131 | static int nclpp; /* # clusters per physical page */ | |
1c79356b | 132 | |
91447636 A |
133 | static int m_howmany(int, size_t ); |
134 | void m_reclaim(void); | |
135 | static int m_clalloc(const int , const int, const size_t, int); | |
136 | int do_reclaim = 0; | |
137 | ||
138 | #define MF_NOWAIT 0x1 | |
139 | #define MF_BIG 0x2 | |
1c79356b A |
140 | |
141 | /* The number of cluster mbufs that are allocated, to start. */ | |
142 | #define MINCL max(16, 2) | |
143 | ||
55e303ae A |
144 | static int mbuf_expand_thread_wakeup = 0; |
145 | static int mbuf_expand_mcl = 0; | |
91447636 | 146 | static int mbuf_expand_big = 0; |
55e303ae A |
147 | static int mbuf_expand_thread_initialized = 0; |
148 | ||
149 | static void mbuf_expand_thread_init(void); | |
91447636 A |
150 | static void mbuf_expand_thread(void); |
151 | static int m_expand(int ); | |
152 | static caddr_t m_bigalloc(int ); | |
153 | static void m_bigfree(caddr_t , u_int , caddr_t ); | |
4452a7af | 154 | __private_extern__ struct mbuf * m_mbigget(struct mbuf *, int ); |
91447636 A |
155 | void mbinit(void); |
156 | static void m_range_check(void *addr); | |
157 | ||
1c79356b | 158 | |
9bccf70c A |
159 | #if 0 |
160 | static int mfree_munge = 0; | |
161 | #if 0 | |
162 | #define _MFREE_MUNGE(m) { \ | |
163 | if (mfree_munge) \ | |
164 | { int i; \ | |
165 | vm_offset_t *element = (vm_offset_t *)(m); \ | |
166 | for (i = 0; \ | |
167 | i < sizeof(struct mbuf)/sizeof(vm_offset_t); \ | |
168 | i++) \ | |
169 | (element)[i] = 0xdeadbeef; \ | |
170 | } \ | |
171 | } | |
172 | #else | |
173 | void | |
174 | munge_mbuf(struct mbuf *m) | |
175 | { | |
176 | int i; | |
177 | vm_offset_t *element = (vm_offset_t *)(m); | |
178 | for (i = 0; | |
179 | i < sizeof(struct mbuf)/sizeof(vm_offset_t); | |
180 | i++) | |
181 | (element)[i] = 0xdeadbeef; | |
182 | } | |
183 | #define _MFREE_MUNGE(m) { \ | |
184 | if (mfree_munge) \ | |
185 | munge_mbuf(m); \ | |
186 | } | |
187 | #endif | |
188 | #else | |
189 | #define _MFREE_MUNGE(m) | |
190 | #endif | |
191 | ||
192 | ||
193 | #define _MINTGET(m, type) { \ | |
194 | MBUF_LOCK(); \ | |
195 | if (((m) = mfree) != 0) { \ | |
196 | MCHECK(m); \ | |
197 | ++mclrefcnt[mtocl(m)]; \ | |
198 | mbstat.m_mtypes[MT_FREE]--; \ | |
199 | mbstat.m_mtypes[(type)]++; \ | |
200 | mfree = (m)->m_next; \ | |
201 | } \ | |
202 | MBUF_UNLOCK(); \ | |
203 | } | |
204 | ||
1c79356b | 205 | |
91447636 A |
206 | static void |
207 | m_range_check(void *addr) | |
1c79356b | 208 | { |
91447636 A |
209 | if (addr && (addr < (void *)mbutl || addr >= (void *)embutl)) |
210 | panic("mbuf address out of range 0x%x", addr); | |
211 | } | |
212 | ||
213 | __private_extern__ void | |
214 | mbinit(void) | |
215 | { | |
216 | int m; | |
1c79356b | 217 | int initmcl = 32; |
91447636 | 218 | int mcl_pages; |
1c79356b A |
219 | |
220 | if (nclpp) | |
221 | return; | |
55e303ae | 222 | nclpp = round_page_32(MCLBYTES) / MCLBYTES; /* see mbufgc() */ |
1c79356b | 223 | if (nclpp < 1) nclpp = 1; |
91447636 | 224 | mbuf_mlock_grp_attr = lck_grp_attr_alloc_init(); |
91447636 A |
225 | |
226 | mbuf_mlock_grp = lck_grp_alloc_init("mbuf", mbuf_mlock_grp_attr); | |
227 | mbuf_mlock_attr = lck_attr_alloc_init(); | |
91447636 A |
228 | |
229 | mbuf_mlock = lck_mtx_alloc_init(mbuf_mlock_grp, mbuf_mlock_attr); | |
fa4905b1 | 230 | |
91447636 A |
231 | mbstat.m_msize = MSIZE; |
232 | mbstat.m_mclbytes = MCLBYTES; | |
233 | mbstat.m_minclsize = MINCLSIZE; | |
234 | mbstat.m_mlen = MLEN; | |
235 | mbstat.m_mhlen = MHLEN; | |
236 | mbstat.m_bigmclbytes = NBPG; | |
fa4905b1 | 237 | |
1c79356b A |
238 | if (nmbclusters == 0) |
239 | nmbclusters = NMBCLUSTERS; | |
240 | MALLOC(mclrefcnt, short *, nmbclusters * sizeof (short), | |
241 | M_TEMP, M_WAITOK); | |
242 | if (mclrefcnt == 0) | |
243 | panic("mbinit"); | |
244 | for (m = 0; m < nmbclusters; m++) | |
245 | mclrefcnt[m] = -1; | |
246 | ||
91447636 A |
247 | /* Calculate the number of pages assigned to the cluster pool */ |
248 | mcl_pages = nmbclusters/(NBPG/CLBYTES); | |
55e303ae | 249 | MALLOC(mcl_paddr, int *, mcl_pages * sizeof(int), M_TEMP, M_WAITOK); |
1c79356b A |
250 | if (mcl_paddr == 0) |
251 | panic("mbinit1"); | |
91447636 A |
252 | /* Register with the I/O Bus mapper */ |
253 | mcl_paddr_base = IOMapperIOVMAlloc(mcl_pages); | |
55e303ae | 254 | bzero((char *)mcl_paddr, mcl_pages * sizeof(int)); |
1c79356b A |
255 | |
256 | embutl = (union mcluster *)((unsigned char *)mbutl + (nmbclusters * MCLBYTES)); | |
257 | ||
258 | PE_parse_boot_arg("initmcl", &initmcl); | |
259 | ||
91447636 | 260 | if (m_clalloc(max(NBPG/CLBYTES, 1) * initmcl, M_WAIT, MCLBYTES, 0) == 0) |
1c79356b A |
261 | goto bad; |
262 | MBUF_UNLOCK(); | |
55e303ae A |
263 | |
264 | (void) kernel_thread(kernel_task, mbuf_expand_thread_init); | |
265 | ||
1c79356b A |
266 | return; |
267 | bad: | |
268 | panic("mbinit"); | |
269 | } | |
270 | ||
271 | /* | |
272 | * Allocate some number of mbuf clusters | |
273 | * and place on cluster free list. | |
91447636 | 274 | * Take the mbuf lock (if not already locked) and do not release it |
1c79356b A |
275 | */ |
276 | /* ARGSUSED */ | |
91447636 A |
277 | static int |
278 | m_clalloc( | |
279 | const int num, | |
280 | const int nowait, | |
281 | const size_t bufsize, | |
282 | int locked) | |
1c79356b | 283 | { |
91447636 A |
284 | int i; |
285 | vm_size_t size = 0; | |
286 | int numpages = 0; | |
287 | vm_offset_t page = 0; | |
1c79356b | 288 | |
91447636 A |
289 | if (locked == 0) |
290 | MBUF_LOCK(); | |
1c79356b A |
291 | /* |
292 | * Honor the caller's wish to block or not block. | |
293 | * We have a way to grow the pool asynchronously, | |
294 | * by kicking the dlil_input_thread. | |
295 | */ | |
91447636 A |
296 | i = m_howmany(num, bufsize); |
297 | if (i == 0 || nowait == M_DONTWAIT) | |
1c79356b A |
298 | goto out; |
299 | ||
91447636 A |
300 | MBUF_UNLOCK(); |
301 | size = round_page_32(i * bufsize); | |
302 | page = kmem_mb_alloc(mb_map, size); | |
1c79356b | 303 | |
91447636 A |
304 | if (page == 0) { |
305 | size = NBPG; /* Try for 1 if failed */ | |
306 | page = kmem_mb_alloc(mb_map, size); | |
1c79356b | 307 | } |
91447636 | 308 | MBUF_LOCK(); |
1c79356b | 309 | |
91447636 A |
310 | if (page) { |
311 | numpages = size / NBPG; | |
312 | for (i = 0; i < numpages; i++, page += NBPG) { | |
313 | if (((int)page & PGOFSET) == 0) { | |
314 | ppnum_t offset = ((char *)page - (char *)mbutl)/NBPG; | |
315 | ppnum_t new_page = pmap_find_phys(kernel_pmap, (vm_address_t) page); | |
316 | ||
317 | /* | |
318 | * In the case of no mapper being available | |
319 | * the following code nops and returns the | |
320 | * input page, if there is a mapper the I/O | |
321 | * page appropriate is returned. | |
322 | */ | |
323 | new_page = IOMapperInsertPage(mcl_paddr_base, offset, new_page); | |
324 | mcl_paddr[offset] = new_page << 12; | |
325 | } | |
326 | if (bufsize == MCLBYTES) { | |
327 | union mcluster *mcl = (union mcluster *)page; | |
328 | ||
329 | if (++mclrefcnt[mtocl(mcl)] != 0) | |
330 | panic("m_clalloc already there"); | |
331 | mcl->mcl_next = mclfree; | |
332 | mclfree = mcl++; | |
333 | if (++mclrefcnt[mtocl(mcl)] != 0) | |
334 | panic("m_clalloc already there"); | |
335 | mcl->mcl_next = mclfree; | |
336 | mclfree = mcl++; | |
337 | } else { | |
338 | union mbigcluster *mbc = (union mbigcluster *)page; | |
339 | ||
340 | if (++mclrefcnt[mtocl(mbc)] != 0) | |
341 | panic("m_clalloc already there"); | |
342 | if (++mclrefcnt[mtocl(mbc) + 1] != 0) | |
343 | panic("m_clalloc already there"); | |
344 | ||
345 | mbc->mbc_next = mbigfree; | |
346 | mbigfree = mbc; | |
347 | } | |
348 | } | |
349 | if (bufsize == MCLBYTES) { | |
350 | int numcl = numpages << 1; | |
351 | mbstat.m_clfree += numcl; | |
352 | mbstat.m_clusters += numcl; | |
353 | return (numcl); | |
354 | } else { | |
355 | mbstat.m_bigclfree += numpages; | |
356 | mbstat.m_bigclusters += numpages; | |
357 | return (numpages); | |
1c79356b | 358 | } |
1c79356b A |
359 | } /* else ... */ |
360 | out: | |
1c79356b | 361 | /* |
91447636 | 362 | * When non-blocking we kick a thread if we havve to grow the |
1c79356b A |
363 | * pool or if the number of free clusters is less than requested. |
364 | */ | |
91447636 A |
365 | if (bufsize == MCLBYTES) { |
366 | if (i > 0) { | |
367 | /* Remember total number of clusters needed at this time */ | |
368 | i += mbstat.m_clusters; | |
369 | if (i > mbuf_expand_mcl) { | |
370 | mbuf_expand_mcl = i; | |
371 | if (mbuf_expand_thread_initialized) | |
372 | wakeup((caddr_t)&mbuf_expand_thread_wakeup); | |
373 | } | |
374 | } | |
375 | ||
376 | if (mbstat.m_clfree >= num) | |
377 | return 1; | |
378 | } else { | |
379 | if (i > 0) { | |
380 | /* Remember total number of 4KB clusters needed at this time */ | |
381 | i += mbstat.m_bigclusters; | |
382 | if (i > mbuf_expand_big) { | |
383 | mbuf_expand_big = i; | |
384 | if (mbuf_expand_thread_initialized) | |
385 | wakeup((caddr_t)&mbuf_expand_thread_wakeup); | |
386 | } | |
387 | } | |
388 | ||
389 | if (mbstat.m_bigclfree >= num) | |
390 | return 1; | |
1c79356b | 391 | } |
1c79356b A |
392 | return 0; |
393 | } | |
394 | ||
395 | /* | |
396 | * Add more free mbufs by cutting up a cluster. | |
397 | */ | |
91447636 A |
398 | static int |
399 | m_expand(int canwait) | |
1c79356b | 400 | { |
91447636 | 401 | caddr_t mcl; |
1c79356b | 402 | |
91447636 A |
403 | if (mbstat.m_clfree < (mbstat.m_clusters >> 4)) { |
404 | /* | |
405 | * 1/16th of the total number of cluster mbufs allocated is | |
406 | * reserved for large packets. The number reserved must | |
407 | * always be < 1/2, or future allocation will be prevented. | |
408 | */ | |
409 | (void)m_clalloc(1, canwait, MCLBYTES, 0); | |
410 | MBUF_UNLOCK(); | |
411 | if (mbstat.m_clfree < (mbstat.m_clusters >> 4)) | |
412 | return 0; | |
413 | } | |
1c79356b A |
414 | |
415 | MCLALLOC(mcl, canwait); | |
416 | if (mcl) { | |
91447636 A |
417 | struct mbuf *m = (struct mbuf *)mcl; |
418 | int i = NMBPCL; | |
1c79356b A |
419 | MBUF_LOCK(); |
420 | mbstat.m_mtypes[MT_FREE] += i; | |
421 | mbstat.m_mbufs += i; | |
422 | while (i--) { | |
91447636 | 423 | _MFREE_MUNGE(m); |
1c79356b A |
424 | m->m_type = MT_FREE; |
425 | m->m_next = mfree; | |
426 | mfree = m++; | |
427 | } | |
428 | i = m_want; | |
429 | m_want = 0; | |
430 | MBUF_UNLOCK(); | |
431 | if (i) wakeup((caddr_t)&mfree); | |
432 | return 1; | |
433 | } | |
434 | return 0; | |
435 | } | |
436 | ||
437 | /* | |
438 | * When MGET failes, ask protocols to free space when short of memory, | |
439 | * then re-attempt to allocate an mbuf. | |
440 | */ | |
441 | struct mbuf * | |
91447636 A |
442 | m_retry( |
443 | int canwait, | |
444 | int type) | |
1c79356b | 445 | { |
91447636 A |
446 | struct mbuf *m; |
447 | int wait; | |
1c79356b A |
448 | |
449 | for (;;) { | |
450 | (void) m_expand(canwait); | |
9bccf70c A |
451 | _MINTGET(m, type); |
452 | if (m) { | |
453 | (m)->m_next = (m)->m_nextpkt = 0; | |
454 | (m)->m_type = (type); | |
455 | (m)->m_data = (m)->m_dat; | |
456 | (m)->m_flags = 0; | |
91447636 | 457 | (m)->m_len = 0; |
9bccf70c | 458 | } |
1c79356b A |
459 | if (m || canwait == M_DONTWAIT) |
460 | break; | |
461 | MBUF_LOCK(); | |
462 | wait = m_want++; | |
91447636 | 463 | mbuf_expand_mcl++; |
9bccf70c A |
464 | if (wait == 0) |
465 | mbstat.m_drain++; | |
466 | else | |
467 | mbstat.m_wait++; | |
1c79356b A |
468 | MBUF_UNLOCK(); |
469 | ||
55e303ae A |
470 | if (mbuf_expand_thread_initialized) |
471 | wakeup((caddr_t)&mbuf_expand_thread_wakeup); | |
1c79356b | 472 | |
91447636 | 473 | if (wait == 0) { |
1c79356b A |
474 | m_reclaim(); |
475 | } else { | |
91447636 A |
476 | struct timespec ts; |
477 | ts.tv_sec = 1; | |
478 | ts.tv_nsec = 0; | |
479 | (void) msleep((caddr_t)&mfree, 0, (PZERO-1) | PDROP, "m_retry", &ts); | |
1c79356b | 480 | } |
1c79356b | 481 | } |
55e303ae A |
482 | if (m == 0) |
483 | mbstat.m_drops++; | |
1c79356b | 484 | return (m); |
1c79356b A |
485 | } |
486 | ||
487 | /* | |
488 | * As above; retry an MGETHDR. | |
489 | */ | |
490 | struct mbuf * | |
91447636 A |
491 | m_retryhdr( |
492 | int canwait, | |
493 | int type) | |
1c79356b | 494 | { |
91447636 | 495 | struct mbuf *m; |
1c79356b | 496 | |
91447636 A |
497 | if ((m = m_retry(canwait, type))) { |
498 | m->m_next = m->m_nextpkt = 0; | |
1c79356b A |
499 | m->m_flags |= M_PKTHDR; |
500 | m->m_data = m->m_pktdat; | |
55e303ae | 501 | _M_CLEAR_PKTHDR(m); |
1c79356b A |
502 | } |
503 | return (m); | |
504 | } | |
505 | ||
91447636 A |
506 | void |
507 | m_reclaim(void) | |
1c79356b | 508 | { |
91447636 | 509 | do_reclaim = 1; /* drain is performed in pfslowtimo(), to avoid deadlocks */ |
1c79356b A |
510 | mbstat.m_drain++; |
511 | } | |
512 | ||
513 | /* | |
514 | * Space allocation routines. | |
515 | * These are also available as macros | |
516 | * for critical paths. | |
517 | */ | |
518 | struct mbuf * | |
91447636 A |
519 | m_get( |
520 | int nowait, | |
521 | int type) | |
1c79356b | 522 | { |
91447636 A |
523 | struct mbuf *m; |
524 | ||
525 | m_range_check(mfree); | |
526 | m_range_check(mclfree); | |
527 | m_range_check(mbigfree); | |
1c79356b | 528 | |
9bccf70c A |
529 | _MINTGET(m, type); |
530 | if (m) { | |
531 | m->m_next = m->m_nextpkt = 0; | |
532 | m->m_type = type; | |
533 | m->m_data = m->m_dat; | |
534 | m->m_flags = 0; | |
91447636 | 535 | m->m_len = 0; |
9bccf70c A |
536 | } else |
537 | (m) = m_retry(nowait, type); | |
538 | ||
91447636 A |
539 | m_range_check(mfree); |
540 | m_range_check(mclfree); | |
541 | m_range_check(mbigfree); | |
542 | ||
543 | ||
1c79356b A |
544 | return (m); |
545 | } | |
546 | ||
547 | struct mbuf * | |
91447636 A |
548 | m_gethdr( |
549 | int nowait, | |
550 | int type) | |
1c79356b | 551 | { |
91447636 A |
552 | struct mbuf *m; |
553 | ||
554 | m_range_check(mfree); | |
555 | m_range_check(mclfree); | |
556 | m_range_check(mbigfree); | |
557 | ||
1c79356b | 558 | |
9bccf70c A |
559 | _MINTGET(m, type); |
560 | if (m) { | |
561 | m->m_next = m->m_nextpkt = 0; | |
562 | m->m_type = type; | |
563 | m->m_data = m->m_pktdat; | |
564 | m->m_flags = M_PKTHDR; | |
91447636 | 565 | m->m_len = 0; |
55e303ae | 566 | _M_CLEAR_PKTHDR(m) |
9bccf70c A |
567 | } else |
568 | m = m_retryhdr(nowait, type); | |
569 | ||
91447636 A |
570 | m_range_check(mfree); |
571 | m_range_check(mclfree); | |
572 | m_range_check(mbigfree); | |
573 | ||
574 | ||
9bccf70c | 575 | return m; |
1c79356b A |
576 | } |
577 | ||
578 | struct mbuf * | |
91447636 A |
579 | m_getclr( |
580 | int nowait, | |
581 | int type) | |
1c79356b | 582 | { |
91447636 | 583 | struct mbuf *m; |
1c79356b A |
584 | |
585 | MGET(m, nowait, type); | |
586 | if (m == 0) | |
587 | return (0); | |
588 | bzero(mtod(m, caddr_t), MLEN); | |
589 | return (m); | |
590 | } | |
591 | ||
592 | struct mbuf * | |
91447636 A |
593 | m_free( |
594 | struct mbuf *m) | |
1c79356b A |
595 | { |
596 | struct mbuf *n = m->m_next; | |
91447636 A |
597 | int i; |
598 | ||
599 | m_range_check(m); | |
600 | m_range_check(mfree); | |
601 | m_range_check(mclfree); | |
1c79356b A |
602 | |
603 | if (m->m_type == MT_FREE) | |
604 | panic("freeing free mbuf"); | |
605 | ||
9bccf70c A |
606 | /* Free the aux data if there is any */ |
607 | if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.aux) | |
608 | { | |
609 | m_freem(m->m_pkthdr.aux); | |
610 | } | |
91447636 A |
611 | if ((m->m_flags & M_PKTHDR) != 0) |
612 | m_tag_delete_chain(m, NULL); | |
9bccf70c | 613 | |
1c79356b | 614 | MBUF_LOCK(); |
9bccf70c A |
615 | if ((m->m_flags & M_EXT)) |
616 | { | |
1c79356b A |
617 | if (MCLHASREFERENCE(m)) { |
618 | remque((queue_t)&m->m_ext.ext_refs); | |
619 | } else if (m->m_ext.ext_free == NULL) { | |
620 | union mcluster *mcl= (union mcluster *)m->m_ext.ext_buf; | |
91447636 A |
621 | |
622 | m_range_check(mcl); | |
623 | ||
9bccf70c | 624 | if (_MCLUNREF(mcl)) { |
1c79356b A |
625 | mcl->mcl_next = mclfree; |
626 | mclfree = mcl; | |
627 | ++mbstat.m_clfree; | |
628 | } | |
629 | #ifdef COMMENT_OUT | |
630 | /* *** Since m_split() increments "mclrefcnt[mtocl(m->m_ext.ext_buf)]", | |
631 | and AppleTalk ADSP uses m_split(), this incorrect sanity check | |
632 | caused a panic. | |
633 | *** */ | |
634 | else /* sanity check - not referenced this way */ | |
635 | panic("m_free m_ext cluster not free"); | |
636 | #endif | |
637 | } else { | |
638 | (*(m->m_ext.ext_free))(m->m_ext.ext_buf, | |
639 | m->m_ext.ext_size, m->m_ext.ext_arg); | |
640 | } | |
641 | } | |
642 | mbstat.m_mtypes[m->m_type]--; | |
9bccf70c | 643 | (void) _MCLUNREF(m); |
91447636 | 644 | _MFREE_MUNGE(m); |
1c79356b A |
645 | m->m_type = MT_FREE; |
646 | mbstat.m_mtypes[m->m_type]++; | |
647 | m->m_flags = 0; | |
648 | m->m_next = mfree; | |
649 | m->m_len = 0; | |
650 | mfree = m; | |
651 | i = m_want; | |
652 | m_want = 0; | |
653 | MBUF_UNLOCK(); | |
654 | if (i) wakeup((caddr_t)&mfree); | |
655 | return (n); | |
656 | } | |
657 | ||
9bccf70c A |
658 | /* m_mclget() add an mbuf cluster to a normal mbuf */ |
659 | struct mbuf * | |
91447636 A |
660 | m_mclget( |
661 | struct mbuf *m, | |
662 | int nowait) | |
9bccf70c A |
663 | { |
664 | MCLALLOC(m->m_ext.ext_buf, nowait); | |
665 | if (m->m_ext.ext_buf) { | |
666 | m->m_data = m->m_ext.ext_buf; | |
667 | m->m_flags |= M_EXT; | |
668 | m->m_ext.ext_size = MCLBYTES; | |
669 | m->m_ext.ext_free = 0; | |
670 | m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward = | |
671 | &m->m_ext.ext_refs; | |
672 | } | |
673 | ||
674 | return m; | |
675 | } | |
676 | ||
677 | /* m_mclalloc() allocate an mbuf cluster */ | |
678 | caddr_t | |
91447636 A |
679 | m_mclalloc( |
680 | int nowait) | |
9bccf70c A |
681 | { |
682 | caddr_t p; | |
683 | ||
91447636 | 684 | (void)m_clalloc(1, nowait, MCLBYTES, 0); |
9bccf70c A |
685 | if ((p = (caddr_t)mclfree)) { |
686 | ++mclrefcnt[mtocl(p)]; | |
687 | mbstat.m_clfree--; | |
688 | mclfree = ((union mcluster *)p)->mcl_next; | |
55e303ae A |
689 | } else { |
690 | mbstat.m_drops++; | |
9bccf70c A |
691 | } |
692 | MBUF_UNLOCK(); | |
693 | ||
91447636 | 694 | return p; |
9bccf70c A |
695 | } |
696 | ||
697 | /* m_mclfree() releases a reference to a cluster allocated by MCLALLOC, | |
698 | * freeing the cluster if the reference count has reached 0. */ | |
699 | void | |
91447636 A |
700 | m_mclfree( |
701 | caddr_t p) | |
9bccf70c A |
702 | { |
703 | MBUF_LOCK(); | |
91447636 A |
704 | |
705 | m_range_check(p); | |
706 | ||
9bccf70c A |
707 | if (--mclrefcnt[mtocl(p)] == 0) { |
708 | ((union mcluster *)(p))->mcl_next = mclfree; | |
709 | mclfree = (union mcluster *)(p); | |
710 | mbstat.m_clfree++; | |
711 | } | |
712 | MBUF_UNLOCK(); | |
713 | } | |
714 | ||
715 | /* mcl_hasreference() checks if a cluster of an mbuf is referenced by another mbuf */ | |
716 | int | |
91447636 A |
717 | m_mclhasreference( |
718 | struct mbuf *m) | |
9bccf70c A |
719 | { |
720 | return (m->m_ext.ext_refs.forward != &(m->m_ext.ext_refs)); | |
721 | } | |
722 | ||
91447636 A |
723 | __private_extern__ caddr_t |
724 | m_bigalloc(int nowait) | |
725 | { | |
726 | caddr_t p; | |
727 | ||
728 | (void)m_clalloc(1, nowait, NBPG, 0); | |
729 | if ((p = (caddr_t)mbigfree)) { | |
730 | if (mclrefcnt[mtocl(p)] != mclrefcnt[mtocl(p) + 1]) | |
731 | panic("m_bigalloc mclrefcnt %x mismatch %d != %d", | |
732 | p, mclrefcnt[mtocl(p)], mclrefcnt[mtocl(p) + 1]); | |
733 | if (mclrefcnt[mtocl(p)] || mclrefcnt[mtocl(p) + 1]) | |
734 | panic("m_bigalloc mclrefcnt %x not null %d != %d", | |
735 | p, mclrefcnt[mtocl(p)], mclrefcnt[mtocl(p) + 1]); | |
736 | ++mclrefcnt[mtocl(p)]; | |
737 | ++mclrefcnt[mtocl(p) + 1]; | |
738 | mbstat.m_bigclfree--; | |
739 | mbigfree = ((union mbigcluster *)p)->mbc_next; | |
740 | } else { | |
741 | mbstat.m_drops++; | |
742 | } | |
743 | MBUF_UNLOCK(); | |
744 | return p; | |
745 | } | |
746 | ||
747 | __private_extern__ void | |
748 | m_bigfree(caddr_t p, __unused u_int size, __unused caddr_t arg) | |
749 | { | |
750 | m_range_check(p); | |
751 | ||
752 | if (mclrefcnt[mtocl(p)] != mclrefcnt[mtocl(p) + 1]) | |
753 | panic("m_bigfree mclrefcnt %x mismatch %d != %d", | |
754 | p, mclrefcnt[mtocl(p)], mclrefcnt[mtocl(p) + 1]); | |
755 | --mclrefcnt[mtocl(p)]; | |
756 | --mclrefcnt[mtocl(p) + 1]; | |
757 | if (mclrefcnt[mtocl(p)] == 0) { | |
758 | ((union mbigcluster *)(p))->mbc_next = mbigfree; | |
759 | mbigfree = (union mbigcluster *)(p); | |
760 | mbstat.m_bigclfree++; | |
761 | } | |
762 | } | |
763 | ||
764 | /* m_mbigget() add an 4KB mbuf cluster to a normal mbuf */ | |
765 | __private_extern__ struct mbuf * | |
766 | m_mbigget(struct mbuf *m, int nowait) | |
767 | { | |
768 | m->m_ext.ext_buf = m_bigalloc(nowait); | |
769 | if (m->m_ext.ext_buf) { | |
770 | m->m_data = m->m_ext.ext_buf; | |
771 | m->m_flags |= M_EXT; | |
772 | m->m_ext.ext_size = NBPG; | |
773 | m->m_ext.ext_free = m_bigfree; | |
774 | m->m_ext.ext_arg = 0; | |
775 | m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward = | |
776 | &m->m_ext.ext_refs; | |
777 | } | |
778 | ||
779 | return m; | |
780 | } | |
781 | ||
782 | ||
9bccf70c A |
783 | /* */ |
784 | void | |
91447636 A |
785 | m_copy_pkthdr( |
786 | struct mbuf *to, | |
787 | struct mbuf *from) | |
9bccf70c A |
788 | { |
789 | to->m_pkthdr = from->m_pkthdr; | |
790 | from->m_pkthdr.aux = (struct mbuf *)NULL; | |
91447636 | 791 | SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ |
9bccf70c A |
792 | to->m_flags = from->m_flags & M_COPYFLAGS; |
793 | to->m_data = (to)->m_pktdat; | |
794 | } | |
795 | ||
91447636 A |
796 | /* |
797 | * "Move" mbuf pkthdr from "from" to "to". | |
798 | * "from" must have M_PKTHDR set, and "to" must be empty. | |
1c79356b | 799 | */ |
91447636 A |
800 | #ifndef __APPLE__ |
801 | void | |
802 | m_move_pkthdr(struct mbuf *to, struct mbuf *from) | |
1c79356b | 803 | { |
91447636 | 804 | KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster")); |
1c79356b | 805 | |
91447636 A |
806 | to->m_flags = from->m_flags & M_COPYFLAGS; |
807 | to->m_data = to->m_pktdat; | |
808 | to->m_pkthdr = from->m_pkthdr; /* especially tags */ | |
809 | SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ | |
810 | from->m_flags &= ~M_PKTHDR; | |
1c79356b | 811 | } |
91447636 | 812 | #endif |
1c79356b | 813 | |
91447636 A |
814 | /* |
815 | * Duplicate "from"'s mbuf pkthdr in "to". | |
816 | * "from" must have M_PKTHDR set, and "to" must be empty. | |
817 | * In particular, this does a deep copy of the packet tags. | |
818 | */ | |
3a60a9f5 | 819 | static int |
91447636 A |
820 | m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how) |
821 | { | |
822 | to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); | |
823 | if ((to->m_flags & M_EXT) == 0) | |
824 | to->m_data = to->m_pktdat; | |
4452a7af A |
825 | if (to->m_pkthdr.aux != NULL) |
826 | m_freem(to->m_pkthdr.aux); | |
91447636 | 827 | to->m_pkthdr = from->m_pkthdr; |
4452a7af A |
828 | to->m_pkthdr.aux = NULL; |
829 | (void) m_aux_copy(to, from); | |
91447636 A |
830 | SLIST_INIT(&to->m_pkthdr.tags); |
831 | return (m_tag_copy_chain(to, from, how)); | |
832 | } | |
fa4905b1 | 833 | |
9bccf70c A |
834 | /* |
835 | * return a list of mbuf hdrs that point to clusters... | |
91447636 | 836 | * try for num_needed, if wantall is not set, return whatever |
9bccf70c A |
837 | * number were available... set up the first num_with_pkthdrs |
838 | * with mbuf hdrs configured as packet headers... these are | |
839 | * chained on the m_nextpkt field... any packets requested beyond | |
840 | * this are chained onto the last packet header's m_next field. | |
91447636 | 841 | * The size of the cluster is controlled by the paramter bufsize. |
9bccf70c | 842 | */ |
91447636 A |
843 | __private_extern__ struct mbuf * |
844 | m_getpackets_internal(unsigned int *num_needed, int num_with_pkthdrs, int how, int wantall, size_t bufsize) | |
fa4905b1 A |
845 | { |
846 | struct mbuf *m; | |
847 | struct mbuf **np, *top; | |
91447636 A |
848 | unsigned int num, needed = *num_needed; |
849 | ||
850 | if (bufsize != MCLBYTES && bufsize != NBPG) | |
851 | return 0; | |
852 | ||
fa4905b1 A |
853 | top = NULL; |
854 | np = ⊤ | |
91447636 A |
855 | |
856 | (void)m_clalloc(needed, how, bufsize, 0); /* takes the MBUF_LOCK, but doesn't release it... */ | |
857 | ||
858 | for (num = 0; num < needed; num++) { | |
859 | m_range_check(mfree); | |
860 | m_range_check(mclfree); | |
861 | m_range_check(mbigfree); | |
862 | ||
863 | if (mfree && ((bufsize == NBPG && mbigfree) || (bufsize == MCLBYTES && mclfree))) { | |
864 | /* mbuf + cluster are available */ | |
865 | m = mfree; | |
866 | MCHECK(m); | |
867 | mfree = m->m_next; | |
868 | ++mclrefcnt[mtocl(m)]; | |
869 | mbstat.m_mtypes[MT_FREE]--; | |
870 | mbstat.m_mtypes[MT_DATA]++; | |
871 | if (bufsize == NBPG) { | |
872 | m->m_ext.ext_buf = (caddr_t)mbigfree; /* get the big cluster */ | |
873 | ++mclrefcnt[mtocl(m->m_ext.ext_buf)]; | |
874 | ++mclrefcnt[mtocl(m->m_ext.ext_buf) + 1]; | |
875 | mbstat.m_bigclfree--; | |
876 | mbigfree = ((union mbigcluster *)(m->m_ext.ext_buf))->mbc_next; | |
877 | m->m_ext.ext_free = m_bigfree; | |
878 | m->m_ext.ext_size = NBPG; | |
879 | } else { | |
880 | m->m_ext.ext_buf = (caddr_t)mclfree; /* get the cluster */ | |
881 | ++mclrefcnt[mtocl(m->m_ext.ext_buf)]; | |
882 | mbstat.m_clfree--; | |
883 | mclfree = ((union mcluster *)(m->m_ext.ext_buf))->mcl_next; | |
884 | m->m_ext.ext_free = 0; | |
885 | m->m_ext.ext_size = MCLBYTES; | |
886 | } | |
887 | m->m_ext.ext_arg = 0; | |
888 | m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward = &m->m_ext.ext_refs; | |
889 | m->m_next = m->m_nextpkt = 0; | |
890 | m->m_type = MT_DATA; | |
891 | m->m_data = m->m_ext.ext_buf; | |
892 | m->m_len = 0; | |
fa4905b1 | 893 | |
91447636 A |
894 | if (num_with_pkthdrs == 0) |
895 | m->m_flags = M_EXT; | |
896 | else { | |
897 | m->m_flags = M_PKTHDR | M_EXT; | |
898 | _M_CLEAR_PKTHDR(m); | |
899 | ||
900 | num_with_pkthdrs--; | |
901 | } | |
902 | } else { | |
903 | MBUF_UNLOCK(); | |
904 | ||
905 | if (num_with_pkthdrs == 0) { | |
906 | MGET(m, how, MT_DATA ); | |
907 | } else { | |
908 | MGETHDR(m, how, MT_DATA); | |
909 | ||
910 | num_with_pkthdrs--; | |
911 | } | |
912 | if (m == 0) | |
913 | goto fail; | |
914 | ||
915 | if (bufsize == NBPG) | |
916 | m = m_mbigget(m, how); | |
917 | else | |
918 | m = m_mclget(m, how); | |
919 | if ((m->m_flags & M_EXT) == 0) { | |
920 | m_free(m); | |
921 | goto fail; | |
922 | } | |
923 | MBUF_LOCK(); | |
924 | } | |
925 | *np = m; | |
926 | ||
927 | if (num_with_pkthdrs) | |
928 | np = &m->m_nextpkt; | |
929 | else | |
930 | np = &m->m_next; | |
931 | } | |
932 | MBUF_UNLOCK(); | |
933 | ||
934 | *num_needed = num; | |
935 | return (top); | |
936 | fail: | |
937 | if (wantall && top) { | |
938 | m_freem(top); | |
939 | return 0; | |
940 | } | |
941 | return top; | |
942 | } | |
fa4905b1 | 943 | |
fa4905b1 | 944 | |
91447636 A |
945 | /* |
946 | * Return list of mbuf linked by m_nextpkt | |
947 | * Try for num_needed, and if wantall is not set, return whatever | |
948 | * number were available | |
949 | * The size of each mbuf in the list is controlled by the parameter packetlen. | |
950 | * Each mbuf of the list may have a chain of mbufs linked by m_next. Each mbuf in | |
951 | * the chain is called a segment. | |
952 | * If maxsegments is not null and the value pointed to is not null, this specify | |
953 | * the maximum number of segments for a chain of mbufs. | |
954 | * If maxsegments is zero or the value pointed to is zero the | |
955 | * caller does not have any restriction on the number of segments. | |
956 | * The actual number of segments of a mbuf chain is return in the value pointed | |
957 | * to by maxsegments. | |
958 | * When possible the allocation is done under a single lock. | |
959 | */ | |
fa4905b1 | 960 | |
91447636 A |
961 | __private_extern__ struct mbuf * |
962 | m_allocpacket_internal(unsigned int *num_needed, size_t packetlen, unsigned int * maxsegments, | |
963 | int how, int wantall, size_t wantsize) | |
964 | { | |
965 | struct mbuf **np, *top; | |
966 | size_t bufsize; | |
967 | unsigned int num; | |
968 | unsigned int numchunks = 0; | |
fa4905b1 | 969 | |
91447636 A |
970 | top = NULL; |
971 | np = ⊤ | |
972 | ||
973 | if (wantsize == 0) { | |
974 | if (packetlen <= MINCLSIZE) | |
975 | bufsize = packetlen; | |
976 | else if (packetlen > MCLBYTES) | |
977 | bufsize = NBPG; | |
978 | else | |
979 | bufsize = MCLBYTES; | |
980 | } else if (wantsize == MCLBYTES || wantsize == NBPG) | |
981 | bufsize = wantsize; | |
982 | else | |
983 | return 0; | |
984 | ||
985 | if (bufsize <= MHLEN) { | |
986 | numchunks = 1; | |
987 | } else if (bufsize <= MINCLSIZE) { | |
988 | if (maxsegments != NULL && *maxsegments == 1) { | |
989 | bufsize = MCLBYTES; | |
990 | numchunks = 1; | |
991 | } else { | |
992 | numchunks = 2; | |
fa4905b1 | 993 | } |
91447636 A |
994 | } else if (bufsize == NBPG) { |
995 | numchunks = ((packetlen - 1) >> PGSHIFT) + 1; | |
996 | } else { | |
997 | numchunks = ((packetlen - 1) >> MCLSHIFT) + 1; | |
998 | } | |
999 | if (maxsegments != NULL) { | |
1000 | if (*maxsegments && numchunks > *maxsegments) { | |
1001 | *maxsegments = numchunks; | |
1002 | return 0; | |
1003 | } | |
1004 | *maxsegments = numchunks; | |
1005 | } | |
1006 | /* m_clalloc takes the MBUF_LOCK, but do not release it */ | |
1007 | (void)m_clalloc(numchunks, how, (bufsize == NBPG) ? NBPG : MCLBYTES, 0); | |
1008 | for (num = 0; num < *num_needed; num++) { | |
1009 | struct mbuf **nm, *pkt = 0; | |
1010 | size_t len; | |
1011 | ||
1012 | nm = &pkt; | |
1013 | ||
1014 | m_range_check(mfree); | |
1015 | m_range_check(mclfree); | |
1016 | m_range_check(mbigfree); | |
1017 | ||
1018 | for (len = 0; len < packetlen; ) { | |
1019 | struct mbuf *m = NULL; | |
1020 | ||
1021 | if (wantsize == 0 && packetlen > MINCLSIZE) { | |
1022 | if (packetlen - len > MCLBYTES) | |
1023 | bufsize = NBPG; | |
1024 | else | |
1025 | bufsize = MCLBYTES; | |
1026 | } | |
1027 | len += bufsize; | |
1028 | ||
1029 | if (mfree && ((bufsize == NBPG && mbigfree) || (bufsize == MCLBYTES && mclfree))) { | |
1030 | /* mbuf + cluster are available */ | |
1031 | m = mfree; | |
1032 | MCHECK(m); | |
1033 | mfree = m->m_next; | |
1034 | ++mclrefcnt[mtocl(m)]; | |
1035 | mbstat.m_mtypes[MT_FREE]--; | |
1036 | mbstat.m_mtypes[MT_DATA]++; | |
1037 | if (bufsize == NBPG) { | |
1038 | m->m_ext.ext_buf = (caddr_t)mbigfree; /* get the big cluster */ | |
1039 | ++mclrefcnt[mtocl(m->m_ext.ext_buf)]; | |
1040 | ++mclrefcnt[mtocl(m->m_ext.ext_buf) + 1]; | |
1041 | mbstat.m_bigclfree--; | |
1042 | mbigfree = ((union mbigcluster *)(m->m_ext.ext_buf))->mbc_next; | |
1043 | m->m_ext.ext_free = m_bigfree; | |
1044 | m->m_ext.ext_size = NBPG; | |
1045 | } else { | |
1046 | m->m_ext.ext_buf = (caddr_t)mclfree; /* get the cluster */ | |
1047 | ++mclrefcnt[mtocl(m->m_ext.ext_buf)]; | |
1048 | mbstat.m_clfree--; | |
1049 | mclfree = ((union mcluster *)(m->m_ext.ext_buf))->mcl_next; | |
1050 | m->m_ext.ext_free = 0; | |
1051 | m->m_ext.ext_size = MCLBYTES; | |
1052 | } | |
1053 | m->m_ext.ext_arg = 0; | |
1054 | m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward = &m->m_ext.ext_refs; | |
1055 | m->m_next = m->m_nextpkt = 0; | |
1056 | m->m_type = MT_DATA; | |
1057 | m->m_data = m->m_ext.ext_buf; | |
1058 | m->m_len = 0; | |
1059 | ||
1060 | if (pkt == 0) { | |
1061 | pkt = m; | |
1062 | m->m_flags = M_PKTHDR | M_EXT; | |
1063 | _M_CLEAR_PKTHDR(m); | |
1064 | } else { | |
1065 | m->m_flags = M_EXT; | |
1066 | } | |
1067 | } else { | |
1068 | MBUF_UNLOCK(); | |
1069 | ||
1070 | if (pkt == 0) { | |
1071 | MGETHDR(m, how, MT_DATA); | |
1072 | } else { | |
1073 | MGET(m, how, MT_DATA ); | |
1074 | } | |
1075 | if (m == 0) { | |
1076 | m_freem(pkt); | |
1077 | goto fail; | |
1078 | } | |
1079 | if (bufsize <= MINCLSIZE) { | |
1080 | if (bufsize > MHLEN) { | |
1081 | MGET(m->m_next, how, MT_DATA); | |
1082 | if (m->m_next == 0) { | |
1083 | m_free(m); | |
1084 | m_freem(pkt); | |
1085 | goto fail; | |
1086 | } | |
1087 | } | |
1088 | } else { | |
1089 | if (bufsize == NBPG) | |
1090 | m = m_mbigget(m, how); | |
1091 | else | |
1092 | m = m_mclget(m, how); | |
1093 | if ((m->m_flags & M_EXT) == 0) { | |
1094 | m_free(m); | |
1095 | m_freem(pkt); | |
1096 | goto fail; | |
1097 | } | |
1098 | } | |
1099 | MBUF_LOCK(); | |
1100 | } | |
1101 | *nm = m; | |
1102 | nm = &m->m_next; | |
1103 | } | |
1104 | *np = pkt; | |
1105 | np = &pkt->m_nextpkt; | |
1106 | } | |
1107 | MBUF_UNLOCK(); | |
1108 | *num_needed = num; | |
1109 | ||
1110 | return top; | |
1111 | fail: | |
1112 | if (wantall && top) { | |
1113 | m_freem(top); | |
1114 | return 0; | |
1115 | } | |
1116 | *num_needed = num; | |
1117 | ||
1118 | return top; | |
1119 | } | |
fa4905b1 | 1120 | |
fa4905b1 | 1121 | |
91447636 A |
1122 | /* Best effort to get a mbuf cluster + pkthdr under one lock. |
1123 | * If we don't have them avail, just bail out and use the regular | |
1124 | * path. | |
1125 | * Used by drivers to allocated packets on receive ring. | |
1126 | */ | |
1127 | __private_extern__ struct mbuf * | |
1128 | m_getpacket_how(int how) | |
1129 | { | |
1130 | unsigned int num_needed = 1; | |
1131 | ||
1132 | return m_getpackets_internal(&num_needed, 1, how, 1, MCLBYTES); | |
1133 | } | |
fa4905b1 | 1134 | |
91447636 A |
1135 | /* Best effort to get a mbuf cluster + pkthdr under one lock. |
1136 | * If we don't have them avail, just bail out and use the regular | |
1137 | * path. | |
1138 | * Used by drivers to allocated packets on receive ring. | |
1139 | */ | |
1140 | struct mbuf * | |
1141 | m_getpacket(void) | |
1142 | { | |
1143 | unsigned int num_needed = 1; | |
9bccf70c | 1144 | |
91447636 A |
1145 | return m_getpackets_internal(&num_needed, 1, M_WAITOK, 1, MCLBYTES); |
1146 | } | |
fa4905b1 | 1147 | |
fa4905b1 | 1148 | |
91447636 A |
1149 | /* |
1150 | * return a list of mbuf hdrs that point to clusters... | |
1151 | * try for num_needed, if this can't be met, return whatever | |
1152 | * number were available... set up the first num_with_pkthdrs | |
1153 | * with mbuf hdrs configured as packet headers... these are | |
1154 | * chained on the m_nextpkt field... any packets requested beyond | |
1155 | * this are chained onto the last packet header's m_next field. | |
1156 | */ | |
1157 | struct mbuf * | |
1158 | m_getpackets(int num_needed, int num_with_pkthdrs, int how) | |
1159 | { | |
1160 | unsigned int n = num_needed; | |
1161 | ||
1162 | return m_getpackets_internal(&n, num_with_pkthdrs, how, 0, MCLBYTES); | |
fa4905b1 A |
1163 | } |
1164 | ||
1165 | ||
9bccf70c A |
1166 | /* |
1167 | * return a list of mbuf hdrs set up as packet hdrs | |
1168 | * chained together on the m_nextpkt field | |
1169 | */ | |
fa4905b1 A |
1170 | struct mbuf * |
1171 | m_getpackethdrs(int num_needed, int how) | |
1172 | { | |
1173 | struct mbuf *m; | |
1174 | struct mbuf **np, *top; | |
1175 | ||
1176 | top = NULL; | |
1177 | np = ⊤ | |
1178 | ||
1179 | MBUF_LOCK(); | |
1180 | ||
1181 | while (num_needed--) { | |
91447636 A |
1182 | m_range_check(mfree); |
1183 | m_range_check(mclfree); | |
1184 | m_range_check(mbigfree); | |
1185 | ||
1186 | if ((m = mfree)) { /* mbufs are available */ | |
fa4905b1 A |
1187 | MCHECK(m); |
1188 | mfree = m->m_next; | |
1189 | ++mclrefcnt[mtocl(m)]; | |
1190 | mbstat.m_mtypes[MT_FREE]--; | |
1191 | mbstat.m_mtypes[MT_DATA]++; | |
1192 | ||
1193 | m->m_next = m->m_nextpkt = 0; | |
1194 | m->m_type = MT_DATA; | |
91447636 A |
1195 | m->m_flags = M_PKTHDR; |
1196 | m->m_len = 0; | |
fa4905b1 | 1197 | m->m_data = m->m_pktdat; |
55e303ae | 1198 | _M_CLEAR_PKTHDR(m); |
fa4905b1 A |
1199 | |
1200 | } else { | |
1201 | ||
91447636 A |
1202 | MBUF_UNLOCK(); |
1203 | m = m_retryhdr(how, MT_DATA); | |
1204 | if (m == 0) | |
1205 | return(top); | |
1206 | MBUF_LOCK(); | |
fa4905b1 A |
1207 | } |
1208 | *np = m; | |
1209 | np = &m->m_nextpkt; | |
1210 | } | |
1211 | MBUF_UNLOCK(); | |
1212 | ||
1213 | return (top); | |
1214 | } | |
1215 | ||
1216 | ||
1c79356b A |
1217 | /* free and mbuf list (m_nextpkt) while following m_next under one lock. |
1218 | * returns the count for mbufs packets freed. Used by the drivers. | |
1219 | */ | |
1220 | int | |
91447636 A |
1221 | m_freem_list( |
1222 | struct mbuf *m) | |
1c79356b A |
1223 | { |
1224 | struct mbuf *nextpkt; | |
fa4905b1 | 1225 | int i, count=0; |
1c79356b | 1226 | |
1c79356b | 1227 | MBUF_LOCK(); |
fa4905b1 | 1228 | |
1c79356b A |
1229 | while (m) { |
1230 | if (m) | |
fa4905b1 | 1231 | nextpkt = m->m_nextpkt; /* chain of linked mbufs from driver */ |
1c79356b | 1232 | else |
fa4905b1 | 1233 | nextpkt = 0; |
9bccf70c | 1234 | |
1c79356b | 1235 | count++; |
fa4905b1 | 1236 | |
1c79356b | 1237 | while (m) { /* free the mbuf chain (like mfreem) */ |
9bccf70c A |
1238 | |
1239 | struct mbuf *n; | |
1240 | ||
91447636 A |
1241 | m_range_check(m); |
1242 | m_range_check(mfree); | |
1243 | m_range_check(mclfree); | |
1244 | m_range_check(mbigfree); | |
1245 | ||
1246 | ||
9bccf70c A |
1247 | /* Free the aux data if there is any */ |
1248 | if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.aux) { | |
1249 | /* | |
1250 | * Treat the current m as the nextpkt and set m | |
55e303ae A |
1251 | * to the aux data. Preserve nextpkt in m->m_nextpkt. |
1252 | * This lets us free the aux data in this loop | |
1253 | * without having to call m_freem recursively, | |
1254 | * which wouldn't work because we've still got | |
1255 | * the lock. | |
9bccf70c | 1256 | */ |
55e303ae | 1257 | m->m_nextpkt = nextpkt; |
9bccf70c A |
1258 | nextpkt = m; |
1259 | m = nextpkt->m_pkthdr.aux; | |
1260 | nextpkt->m_pkthdr.aux = NULL; | |
1261 | } | |
91447636 A |
1262 | |
1263 | if ((m->m_flags & M_PKTHDR) != 0 && !SLIST_EMPTY(&m->m_pkthdr.tags)) { | |
1264 | /* A quick (albeit inefficient) expedient */ | |
1265 | MBUF_UNLOCK(); | |
1266 | m_tag_delete_chain(m, NULL); | |
1267 | MBUF_LOCK(); | |
1268 | } | |
9bccf70c A |
1269 | |
1270 | n = m->m_next; | |
fa4905b1 | 1271 | |
1c79356b A |
1272 | if (n && n->m_nextpkt) |
1273 | panic("m_freem_list: m_nextpkt of m_next != NULL"); | |
1274 | if (m->m_type == MT_FREE) | |
1275 | panic("freeing free mbuf"); | |
fa4905b1 | 1276 | |
1c79356b A |
1277 | if (m->m_flags & M_EXT) { |
1278 | if (MCLHASREFERENCE(m)) { | |
1279 | remque((queue_t)&m->m_ext.ext_refs); | |
1280 | } else if (m->m_ext.ext_free == NULL) { | |
1281 | union mcluster *mcl= (union mcluster *)m->m_ext.ext_buf; | |
91447636 A |
1282 | |
1283 | m_range_check(mcl); | |
1284 | ||
9bccf70c | 1285 | if (_MCLUNREF(mcl)) { |
1c79356b A |
1286 | mcl->mcl_next = mclfree; |
1287 | mclfree = mcl; | |
1288 | ++mbstat.m_clfree; | |
1289 | } | |
1290 | } else { | |
1291 | (*(m->m_ext.ext_free))(m->m_ext.ext_buf, | |
1292 | m->m_ext.ext_size, m->m_ext.ext_arg); | |
1293 | } | |
1294 | } | |
1295 | mbstat.m_mtypes[m->m_type]--; | |
9bccf70c A |
1296 | (void) _MCLUNREF(m); |
1297 | _MFREE_MUNGE(m); | |
fa4905b1 | 1298 | mbstat.m_mtypes[MT_FREE]++; |
1c79356b | 1299 | m->m_type = MT_FREE; |
1c79356b A |
1300 | m->m_flags = 0; |
1301 | m->m_len = 0; | |
1302 | m->m_next = mfree; | |
1303 | mfree = m; | |
1304 | m = n; | |
1305 | } | |
1306 | m = nextpkt; /* bump m with saved nextpkt if any */ | |
1307 | } | |
91447636 A |
1308 | if ((i = m_want)) |
1309 | m_want = 0; | |
fa4905b1 | 1310 | |
1c79356b | 1311 | MBUF_UNLOCK(); |
fa4905b1 A |
1312 | |
1313 | if (i) | |
91447636 | 1314 | wakeup((caddr_t)&mfree); |
fa4905b1 | 1315 | |
1c79356b A |
1316 | return (count); |
1317 | } | |
1318 | ||
1319 | void | |
91447636 A |
1320 | m_freem( |
1321 | struct mbuf *m) | |
1c79356b A |
1322 | { |
1323 | while (m) | |
1324 | m = m_free(m); | |
1325 | } | |
1326 | ||
1327 | /* | |
1328 | * Mbuffer utility routines. | |
1329 | */ | |
1330 | /* | |
1331 | * Compute the amount of space available | |
1332 | * before the current start of data in an mbuf. | |
1333 | */ | |
91447636 A |
1334 | int |
1335 | m_leadingspace( | |
1336 | struct mbuf *m) | |
1c79356b A |
1337 | { |
1338 | if (m->m_flags & M_EXT) { | |
1339 | if (MCLHASREFERENCE(m)) | |
1340 | return(0); | |
1341 | return (m->m_data - m->m_ext.ext_buf); | |
1342 | } | |
1343 | if (m->m_flags & M_PKTHDR) | |
1344 | return (m->m_data - m->m_pktdat); | |
1345 | return (m->m_data - m->m_dat); | |
1346 | } | |
1347 | ||
1348 | /* | |
1349 | * Compute the amount of space available | |
1350 | * after the end of data in an mbuf. | |
1351 | */ | |
91447636 A |
1352 | int |
1353 | m_trailingspace( | |
1354 | struct mbuf *m) | |
1c79356b A |
1355 | { |
1356 | if (m->m_flags & M_EXT) { | |
1357 | if (MCLHASREFERENCE(m)) | |
1358 | return(0); | |
1359 | return (m->m_ext.ext_buf + m->m_ext.ext_size - | |
1360 | (m->m_data + m->m_len)); | |
1361 | } | |
1362 | return (&m->m_dat[MLEN] - (m->m_data + m->m_len)); | |
1363 | } | |
1364 | ||
1365 | /* | |
1366 | * Lesser-used path for M_PREPEND: | |
1367 | * allocate new mbuf to prepend to chain, | |
1368 | * copy junk along. | |
9bccf70c | 1369 | * Does not adjust packet header length. |
1c79356b A |
1370 | */ |
1371 | struct mbuf * | |
91447636 A |
1372 | m_prepend( |
1373 | struct mbuf *m, | |
1374 | int len, | |
1375 | int how) | |
1c79356b A |
1376 | { |
1377 | struct mbuf *mn; | |
1378 | ||
1379 | MGET(mn, how, m->m_type); | |
1380 | if (mn == (struct mbuf *)NULL) { | |
1381 | m_freem(m); | |
1382 | return ((struct mbuf *)NULL); | |
1383 | } | |
1384 | if (m->m_flags & M_PKTHDR) { | |
1385 | M_COPY_PKTHDR(mn, m); | |
1386 | m->m_flags &= ~M_PKTHDR; | |
1387 | } | |
1388 | mn->m_next = m; | |
1389 | m = mn; | |
1390 | if (len < MHLEN) | |
1391 | MH_ALIGN(m, len); | |
1392 | m->m_len = len; | |
1393 | return (m); | |
1394 | } | |
1395 | ||
9bccf70c A |
1396 | /* |
1397 | * Replacement for old M_PREPEND macro: | |
1398 | * allocate new mbuf to prepend to chain, | |
1399 | * copy junk along, and adjust length. | |
1400 | * | |
1401 | */ | |
1402 | struct mbuf * | |
91447636 A |
1403 | m_prepend_2( |
1404 | struct mbuf *m, | |
1405 | int len, | |
1406 | int how) | |
9bccf70c A |
1407 | { |
1408 | if (M_LEADINGSPACE(m) >= len) { | |
1409 | m->m_data -= len; | |
1410 | m->m_len += len; | |
1411 | } else { | |
1412 | m = m_prepend(m, len, how); | |
1413 | } | |
1414 | if ((m) && (m->m_flags & M_PKTHDR)) | |
1415 | m->m_pkthdr.len += len; | |
1416 | return (m); | |
1417 | } | |
1418 | ||
1c79356b A |
1419 | /* |
1420 | * Make a copy of an mbuf chain starting "off0" bytes from the beginning, | |
1421 | * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. | |
1422 | * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. | |
1423 | */ | |
1424 | int MCFail; | |
1425 | ||
1426 | struct mbuf * | |
91447636 A |
1427 | m_copym( |
1428 | struct mbuf *m, | |
1429 | int off0, | |
1430 | int len, | |
1431 | int wait) | |
1c79356b | 1432 | { |
91447636 A |
1433 | struct mbuf *n, **np; |
1434 | int off = off0; | |
1c79356b A |
1435 | struct mbuf *top; |
1436 | int copyhdr = 0; | |
1437 | ||
1438 | if (off < 0 || len < 0) | |
1439 | panic("m_copym"); | |
1440 | if (off == 0 && m->m_flags & M_PKTHDR) | |
1441 | copyhdr = 1; | |
fa4905b1 A |
1442 | |
1443 | while (off >= m->m_len) { | |
1c79356b A |
1444 | if (m == 0) |
1445 | panic("m_copym"); | |
1c79356b A |
1446 | off -= m->m_len; |
1447 | m = m->m_next; | |
1448 | } | |
1449 | np = ⊤ | |
1450 | top = 0; | |
fa4905b1 A |
1451 | |
1452 | MBUF_LOCK(); | |
1453 | ||
1c79356b | 1454 | while (len > 0) { |
91447636 A |
1455 | m_range_check(mfree); |
1456 | m_range_check(mclfree); | |
1457 | m_range_check(mbigfree); | |
1458 | ||
1c79356b A |
1459 | if (m == 0) { |
1460 | if (len != M_COPYALL) | |
1461 | panic("m_copym"); | |
1462 | break; | |
1463 | } | |
91447636 A |
1464 | if ((n = mfree)) { |
1465 | MCHECK(n); | |
1466 | ++mclrefcnt[mtocl(n)]; | |
fa4905b1 A |
1467 | mbstat.m_mtypes[MT_FREE]--; |
1468 | mbstat.m_mtypes[m->m_type]++; | |
1469 | mfree = n->m_next; | |
1470 | n->m_next = n->m_nextpkt = 0; | |
1471 | n->m_type = m->m_type; | |
91447636 | 1472 | n->m_data = n->m_dat; |
fa4905b1 A |
1473 | n->m_flags = 0; |
1474 | } else { | |
1475 | MBUF_UNLOCK(); | |
1476 | n = m_retry(wait, m->m_type); | |
1477 | MBUF_LOCK(); | |
1478 | } | |
1c79356b | 1479 | *np = n; |
fa4905b1 | 1480 | |
1c79356b A |
1481 | if (n == 0) |
1482 | goto nospace; | |
1483 | if (copyhdr) { | |
1484 | M_COPY_PKTHDR(n, m); | |
1485 | if (len == M_COPYALL) | |
1486 | n->m_pkthdr.len -= off0; | |
1487 | else | |
1488 | n->m_pkthdr.len = len; | |
1489 | copyhdr = 0; | |
1490 | } | |
1491 | if (len == M_COPYALL) { | |
1492 | if (min(len, (m->m_len - off)) == len) { | |
1493 | printf("m->m_len %d - off %d = %d, %d\n", | |
1494 | m->m_len, off, m->m_len - off, | |
1495 | min(len, (m->m_len - off))); | |
1496 | } | |
1497 | } | |
1498 | n->m_len = min(len, (m->m_len - off)); | |
1499 | if (n->m_len == M_COPYALL) { | |
1500 | printf("n->m_len == M_COPYALL, fixing\n"); | |
1501 | n->m_len = MHLEN; | |
1502 | } | |
1503 | if (m->m_flags & M_EXT) { | |
1c79356b A |
1504 | n->m_ext = m->m_ext; |
1505 | insque((queue_t)&n->m_ext.ext_refs, (queue_t)&m->m_ext.ext_refs); | |
1c79356b A |
1506 | n->m_data = m->m_data + off; |
1507 | n->m_flags |= M_EXT; | |
fa4905b1 | 1508 | } else { |
1c79356b A |
1509 | bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), |
1510 | (unsigned)n->m_len); | |
fa4905b1 | 1511 | } |
1c79356b A |
1512 | if (len != M_COPYALL) |
1513 | len -= n->m_len; | |
1514 | off = 0; | |
1515 | m = m->m_next; | |
1516 | np = &n->m_next; | |
1517 | } | |
fa4905b1 A |
1518 | MBUF_UNLOCK(); |
1519 | ||
1c79356b A |
1520 | if (top == 0) |
1521 | MCFail++; | |
fa4905b1 | 1522 | |
1c79356b A |
1523 | return (top); |
1524 | nospace: | |
fa4905b1 A |
1525 | MBUF_UNLOCK(); |
1526 | ||
1c79356b A |
1527 | m_freem(top); |
1528 | MCFail++; | |
1529 | return (0); | |
1530 | } | |
1531 | ||
fa4905b1 | 1532 | |
9bccf70c A |
1533 | /* |
1534 | * equivilent to m_copym except that all necessary | |
1535 | * mbuf hdrs are allocated within this routine | |
1536 | * also, the last mbuf and offset accessed are passed | |
1537 | * out and can be passed back in to avoid having to | |
1538 | * rescan the entire mbuf list (normally hung off of the socket) | |
1539 | */ | |
fa4905b1 | 1540 | struct mbuf * |
91447636 A |
1541 | m_copym_with_hdrs( |
1542 | struct mbuf *m, | |
1543 | int off0, | |
1544 | int len, | |
1545 | int wait, | |
1546 | struct mbuf **m_last, | |
1547 | int *m_off) | |
fa4905b1 | 1548 | { |
91447636 A |
1549 | struct mbuf *n, **np = 0; |
1550 | int off = off0; | |
fa4905b1 A |
1551 | struct mbuf *top = 0; |
1552 | int copyhdr = 0; | |
1553 | int type; | |
1554 | ||
1555 | if (off == 0 && m->m_flags & M_PKTHDR) | |
1556 | copyhdr = 1; | |
1557 | ||
1558 | if (*m_last) { | |
1559 | m = *m_last; | |
1560 | off = *m_off; | |
1561 | } else { | |
1562 | while (off >= m->m_len) { | |
1563 | off -= m->m_len; | |
1564 | m = m->m_next; | |
1565 | } | |
1566 | } | |
91447636 | 1567 | |
fa4905b1 A |
1568 | MBUF_LOCK(); |
1569 | ||
1570 | while (len > 0) { | |
91447636 A |
1571 | m_range_check(mfree); |
1572 | m_range_check(mclfree); | |
1573 | m_range_check(mbigfree); | |
1574 | ||
fa4905b1 A |
1575 | if (top == 0) |
1576 | type = MT_HEADER; | |
1577 | else { | |
1578 | if (m == 0) | |
1579 | panic("m_gethdr_and_copym"); | |
1580 | type = m->m_type; | |
1581 | } | |
91447636 | 1582 | if ((n = mfree)) { |
fa4905b1 A |
1583 | MCHECK(n); |
1584 | ++mclrefcnt[mtocl(n)]; | |
1585 | mbstat.m_mtypes[MT_FREE]--; | |
1586 | mbstat.m_mtypes[type]++; | |
1587 | mfree = n->m_next; | |
1588 | n->m_next = n->m_nextpkt = 0; | |
1589 | n->m_type = type; | |
1590 | ||
1591 | if (top) { | |
1592 | n->m_data = n->m_dat; | |
1593 | n->m_flags = 0; | |
1594 | } else { | |
1595 | n->m_data = n->m_pktdat; | |
1596 | n->m_flags = M_PKTHDR; | |
55e303ae | 1597 | _M_CLEAR_PKTHDR(n); |
fa4905b1 A |
1598 | } |
1599 | } else { | |
1600 | MBUF_UNLOCK(); | |
1601 | if (top) | |
1602 | n = m_retry(wait, type); | |
1603 | else | |
1604 | n = m_retryhdr(wait, type); | |
1605 | MBUF_LOCK(); | |
1606 | } | |
1607 | if (n == 0) | |
1608 | goto nospace; | |
1609 | if (top == 0) { | |
1610 | top = n; | |
1611 | np = &top->m_next; | |
1612 | continue; | |
1613 | } else | |
1614 | *np = n; | |
1615 | ||
1616 | if (copyhdr) { | |
1617 | M_COPY_PKTHDR(n, m); | |
1618 | n->m_pkthdr.len = len; | |
1619 | copyhdr = 0; | |
1620 | } | |
1621 | n->m_len = min(len, (m->m_len - off)); | |
1622 | ||
1623 | if (m->m_flags & M_EXT) { | |
1624 | n->m_ext = m->m_ext; | |
1625 | insque((queue_t)&n->m_ext.ext_refs, (queue_t)&m->m_ext.ext_refs); | |
1626 | n->m_data = m->m_data + off; | |
1627 | n->m_flags |= M_EXT; | |
1628 | } else { | |
1629 | bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), | |
1630 | (unsigned)n->m_len); | |
1631 | } | |
1632 | len -= n->m_len; | |
1633 | ||
1634 | if (len == 0) { | |
1635 | if ((off + n->m_len) == m->m_len) { | |
1636 | *m_last = m->m_next; | |
1637 | *m_off = 0; | |
1638 | } else { | |
1639 | *m_last = m; | |
1640 | *m_off = off + n->m_len; | |
1641 | } | |
1642 | break; | |
1643 | } | |
1644 | off = 0; | |
1645 | m = m->m_next; | |
1646 | np = &n->m_next; | |
1647 | } | |
1648 | MBUF_UNLOCK(); | |
1649 | ||
1650 | return (top); | |
1651 | nospace: | |
1652 | MBUF_UNLOCK(); | |
1653 | ||
1654 | if (top) | |
1655 | m_freem(top); | |
1656 | MCFail++; | |
1657 | return (0); | |
1658 | } | |
1659 | ||
1660 | ||
1c79356b A |
1661 | /* |
1662 | * Copy data from an mbuf chain starting "off" bytes from the beginning, | |
1663 | * continuing for "len" bytes, into the indicated buffer. | |
1664 | */ | |
91447636 A |
1665 | void m_copydata( |
1666 | struct mbuf *m, | |
1667 | int off, | |
1668 | int len, | |
1669 | caddr_t cp) | |
1c79356b | 1670 | { |
91447636 | 1671 | unsigned count; |
1c79356b A |
1672 | |
1673 | if (off < 0 || len < 0) | |
1674 | panic("m_copydata"); | |
1675 | while (off > 0) { | |
1676 | if (m == 0) | |
1677 | panic("m_copydata"); | |
1678 | if (off < m->m_len) | |
1679 | break; | |
1680 | off -= m->m_len; | |
1681 | m = m->m_next; | |
1682 | } | |
1683 | while (len > 0) { | |
1684 | if (m == 0) | |
1685 | panic("m_copydata"); | |
1686 | count = min(m->m_len - off, len); | |
1687 | bcopy(mtod(m, caddr_t) + off, cp, count); | |
1688 | len -= count; | |
1689 | cp += count; | |
1690 | off = 0; | |
1691 | m = m->m_next; | |
1692 | } | |
1693 | } | |
1694 | ||
1695 | /* | |
1696 | * Concatenate mbuf chain n to m. | |
1697 | * Both chains must be of the same type (e.g. MT_DATA). | |
1698 | * Any m_pkthdr is not updated. | |
1699 | */ | |
91447636 A |
1700 | void m_cat( |
1701 | struct mbuf *m, struct mbuf *n) | |
1c79356b A |
1702 | { |
1703 | while (m->m_next) | |
1704 | m = m->m_next; | |
1705 | while (n) { | |
1706 | if (m->m_flags & M_EXT || | |
1707 | m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { | |
1708 | /* just join the two chains */ | |
1709 | m->m_next = n; | |
1710 | return; | |
1711 | } | |
1712 | /* splat the data from one into the other */ | |
1713 | bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, | |
1714 | (u_int)n->m_len); | |
1715 | m->m_len += n->m_len; | |
1716 | n = m_free(n); | |
1717 | } | |
1718 | } | |
1719 | ||
1720 | void | |
91447636 A |
1721 | m_adj( |
1722 | struct mbuf *mp, | |
1723 | int req_len) | |
1c79356b | 1724 | { |
91447636 A |
1725 | int len = req_len; |
1726 | struct mbuf *m; | |
1727 | int count; | |
1c79356b A |
1728 | |
1729 | if ((m = mp) == NULL) | |
1730 | return; | |
1731 | if (len >= 0) { | |
1732 | /* | |
1733 | * Trim from head. | |
1734 | */ | |
1735 | while (m != NULL && len > 0) { | |
1736 | if (m->m_len <= len) { | |
1737 | len -= m->m_len; | |
1738 | m->m_len = 0; | |
1739 | m = m->m_next; | |
1740 | } else { | |
1741 | m->m_len -= len; | |
1742 | m->m_data += len; | |
1743 | len = 0; | |
1744 | } | |
1745 | } | |
1746 | m = mp; | |
1747 | if (m->m_flags & M_PKTHDR) | |
1748 | m->m_pkthdr.len -= (req_len - len); | |
1749 | } else { | |
1750 | /* | |
1751 | * Trim from tail. Scan the mbuf chain, | |
1752 | * calculating its length and finding the last mbuf. | |
1753 | * If the adjustment only affects this mbuf, then just | |
1754 | * adjust and return. Otherwise, rescan and truncate | |
1755 | * after the remaining size. | |
1756 | */ | |
1757 | len = -len; | |
1758 | count = 0; | |
1759 | for (;;) { | |
1760 | count += m->m_len; | |
1761 | if (m->m_next == (struct mbuf *)0) | |
1762 | break; | |
1763 | m = m->m_next; | |
1764 | } | |
1765 | if (m->m_len >= len) { | |
1766 | m->m_len -= len; | |
1767 | m = mp; | |
1768 | if (m->m_flags & M_PKTHDR) | |
1769 | m->m_pkthdr.len -= len; | |
1770 | return; | |
1771 | } | |
1772 | count -= len; | |
1773 | if (count < 0) | |
1774 | count = 0; | |
1775 | /* | |
1776 | * Correct length for chain is "count". | |
1777 | * Find the mbuf with last data, adjust its length, | |
1778 | * and toss data from remaining mbufs on chain. | |
1779 | */ | |
1780 | m = mp; | |
1781 | if (m->m_flags & M_PKTHDR) | |
1782 | m->m_pkthdr.len = count; | |
1783 | for (; m; m = m->m_next) { | |
1784 | if (m->m_len >= count) { | |
1785 | m->m_len = count; | |
1786 | break; | |
1787 | } | |
1788 | count -= m->m_len; | |
1789 | } | |
91447636 | 1790 | while ((m = m->m_next)) |
1c79356b A |
1791 | m->m_len = 0; |
1792 | } | |
1793 | } | |
1794 | ||
1795 | /* | |
1796 | * Rearange an mbuf chain so that len bytes are contiguous | |
1797 | * and in the data area of an mbuf (so that mtod and dtom | |
1798 | * will work for a structure of size len). Returns the resulting | |
1799 | * mbuf chain on success, frees it and returns null on failure. | |
1800 | * If there is room, it will add up to max_protohdr-len extra bytes to the | |
1801 | * contiguous region in an attempt to avoid being called next time. | |
1802 | */ | |
1803 | int MPFail; | |
1804 | ||
1805 | struct mbuf * | |
91447636 A |
1806 | m_pullup( |
1807 | struct mbuf *n, | |
1808 | int len) | |
1c79356b | 1809 | { |
91447636 A |
1810 | struct mbuf *m; |
1811 | int count; | |
1c79356b A |
1812 | int space; |
1813 | ||
1814 | /* | |
1815 | * If first mbuf has no cluster, and has room for len bytes | |
1816 | * without shifting current data, pullup into it, | |
1817 | * otherwise allocate a new mbuf to prepend to the chain. | |
1818 | */ | |
1819 | if ((n->m_flags & M_EXT) == 0 && | |
1820 | n->m_data + len < &n->m_dat[MLEN] && n->m_next) { | |
1821 | if (n->m_len >= len) | |
1822 | return (n); | |
1823 | m = n; | |
1824 | n = n->m_next; | |
1825 | len -= m->m_len; | |
1826 | } else { | |
1827 | if (len > MHLEN) | |
1828 | goto bad; | |
1829 | MGET(m, M_DONTWAIT, n->m_type); | |
1830 | if (m == 0) | |
1831 | goto bad; | |
1832 | m->m_len = 0; | |
1833 | if (n->m_flags & M_PKTHDR) { | |
1834 | M_COPY_PKTHDR(m, n); | |
1835 | n->m_flags &= ~M_PKTHDR; | |
1836 | } | |
1837 | } | |
1838 | space = &m->m_dat[MLEN] - (m->m_data + m->m_len); | |
1839 | do { | |
1840 | count = min(min(max(len, max_protohdr), space), n->m_len); | |
1841 | bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, | |
1842 | (unsigned)count); | |
1843 | len -= count; | |
1844 | m->m_len += count; | |
1845 | n->m_len -= count; | |
1846 | space -= count; | |
1847 | if (n->m_len) | |
1848 | n->m_data += count; | |
1849 | else | |
1850 | n = m_free(n); | |
1851 | } while (len > 0 && n); | |
1852 | if (len > 0) { | |
1853 | (void) m_free(m); | |
1854 | goto bad; | |
1855 | } | |
1856 | m->m_next = n; | |
1857 | return (m); | |
1858 | bad: | |
1859 | m_freem(n); | |
1860 | MPFail++; | |
1861 | return (0); | |
1862 | } | |
1863 | ||
1864 | /* | |
1865 | * Partition an mbuf chain in two pieces, returning the tail -- | |
1866 | * all but the first len0 bytes. In case of failure, it returns NULL and | |
1867 | * attempts to restore the chain to its original state. | |
1868 | */ | |
1869 | struct mbuf * | |
91447636 A |
1870 | m_split( |
1871 | struct mbuf *m0, | |
1872 | int len0, | |
1873 | int wait) | |
1c79356b | 1874 | { |
91447636 | 1875 | struct mbuf *m, *n; |
1c79356b A |
1876 | unsigned len = len0, remain; |
1877 | ||
1878 | for (m = m0; m && len > m->m_len; m = m->m_next) | |
1879 | len -= m->m_len; | |
1880 | if (m == 0) | |
1881 | return (0); | |
1882 | remain = m->m_len - len; | |
1883 | if (m0->m_flags & M_PKTHDR) { | |
1884 | MGETHDR(n, wait, m0->m_type); | |
1885 | if (n == 0) | |
1886 | return (0); | |
1887 | n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; | |
1888 | n->m_pkthdr.len = m0->m_pkthdr.len - len0; | |
1889 | m0->m_pkthdr.len = len0; | |
1890 | if (m->m_flags & M_EXT) | |
1891 | goto extpacket; | |
1892 | if (remain > MHLEN) { | |
1893 | /* m can't be the lead packet */ | |
1894 | MH_ALIGN(n, 0); | |
1895 | n->m_next = m_split(m, len, wait); | |
1896 | if (n->m_next == 0) { | |
1897 | (void) m_free(n); | |
1898 | return (0); | |
1899 | } else | |
1900 | return (n); | |
1901 | } else | |
1902 | MH_ALIGN(n, remain); | |
1903 | } else if (remain == 0) { | |
1904 | n = m->m_next; | |
1905 | m->m_next = 0; | |
1906 | return (n); | |
1907 | } else { | |
1908 | MGET(n, wait, m->m_type); | |
1909 | if (n == 0) | |
1910 | return (0); | |
1911 | M_ALIGN(n, remain); | |
1912 | } | |
1913 | extpacket: | |
1914 | if (m->m_flags & M_EXT) { | |
1915 | n->m_flags |= M_EXT; | |
1c79356b | 1916 | MBUF_LOCK(); |
0b4e3aa0 A |
1917 | n->m_ext = m->m_ext; |
1918 | insque((queue_t)&n->m_ext.ext_refs, (queue_t)&m->m_ext.ext_refs); | |
1c79356b | 1919 | MBUF_UNLOCK(); |
1c79356b A |
1920 | n->m_data = m->m_data + len; |
1921 | } else { | |
1922 | bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); | |
1923 | } | |
1924 | n->m_len = remain; | |
1925 | m->m_len = len; | |
1926 | n->m_next = m->m_next; | |
1927 | m->m_next = 0; | |
1928 | return (n); | |
1929 | } | |
1930 | /* | |
1931 | * Routine to copy from device local memory into mbufs. | |
1932 | */ | |
1933 | struct mbuf * | |
91447636 A |
1934 | m_devget( |
1935 | char *buf, | |
1936 | int totlen, | |
1937 | int off0, | |
1938 | struct ifnet *ifp, | |
1939 | void (*copy)(const void *, void *, size_t)) | |
1c79356b | 1940 | { |
91447636 | 1941 | struct mbuf *m; |
1c79356b | 1942 | struct mbuf *top = 0, **mp = ⊤ |
91447636 A |
1943 | int off = off0, len; |
1944 | char *cp; | |
1c79356b A |
1945 | char *epkt; |
1946 | ||
1947 | cp = buf; | |
1948 | epkt = cp + totlen; | |
1949 | if (off) { | |
1950 | /* | |
1951 | * If 'off' is non-zero, packet is trailer-encapsulated, | |
1952 | * so we have to skip the type and length fields. | |
1953 | */ | |
1954 | cp += off + 2 * sizeof(u_int16_t); | |
1955 | totlen -= 2 * sizeof(u_int16_t); | |
1956 | } | |
1957 | MGETHDR(m, M_DONTWAIT, MT_DATA); | |
1958 | if (m == 0) | |
1959 | return (0); | |
1960 | m->m_pkthdr.rcvif = ifp; | |
1961 | m->m_pkthdr.len = totlen; | |
1962 | m->m_len = MHLEN; | |
1963 | ||
1964 | while (totlen > 0) { | |
1965 | if (top) { | |
1966 | MGET(m, M_DONTWAIT, MT_DATA); | |
1967 | if (m == 0) { | |
1968 | m_freem(top); | |
1969 | return (0); | |
1970 | } | |
1971 | m->m_len = MLEN; | |
1972 | } | |
1973 | len = min(totlen, epkt - cp); | |
1974 | if (len >= MINCLSIZE) { | |
1975 | MCLGET(m, M_DONTWAIT); | |
1976 | if (m->m_flags & M_EXT) | |
1977 | m->m_len = len = min(len, MCLBYTES); | |
1978 | else { | |
1979 | /* give up when it's out of cluster mbufs */ | |
1980 | if (top) | |
1981 | m_freem(top); | |
1982 | m_freem(m); | |
1983 | return (0); | |
1984 | } | |
1985 | } else { | |
1986 | /* | |
1987 | * Place initial small packet/header at end of mbuf. | |
1988 | */ | |
1989 | if (len < m->m_len) { | |
1990 | if (top == 0 && len + max_linkhdr <= m->m_len) | |
1991 | m->m_data += max_linkhdr; | |
1992 | m->m_len = len; | |
1993 | } else | |
1994 | len = m->m_len; | |
1995 | } | |
1996 | if (copy) | |
1997 | copy(cp, mtod(m, caddr_t), (unsigned)len); | |
1998 | else | |
1999 | bcopy(cp, mtod(m, caddr_t), (unsigned)len); | |
2000 | cp += len; | |
2001 | *mp = m; | |
2002 | mp = &m->m_next; | |
2003 | totlen -= len; | |
2004 | if (cp == epkt) | |
2005 | cp = buf; | |
2006 | } | |
2007 | return (top); | |
2008 | } | |
2009 | ||
2010 | /* | |
2011 | * Cluster freelist allocation check. The mbuf lock must be held. | |
2012 | * Ensure hysteresis between hi/lo. | |
2013 | */ | |
2014 | static int | |
91447636 | 2015 | m_howmany(int num, size_t bufsize) |
1c79356b | 2016 | { |
91447636 A |
2017 | int i = 0; |
2018 | ||
2019 | /* Bail if we've maxed out the mbuf memory map */ | |
2020 | if (mbstat.m_clusters + (mbstat.m_bigclusters << 1) < nmbclusters) { | |
2021 | int j = 0; | |
2022 | ||
2023 | if (bufsize == MCLBYTES) { | |
2024 | /* Under minimum */ | |
2025 | if (mbstat.m_clusters < MINCL) | |
2026 | return (MINCL - mbstat.m_clusters); | |
2027 | /* Too few (free < 1/2 total) and not over maximum */ | |
2028 | if (mbstat.m_clusters < (nmbclusters >> 1)) { | |
2029 | if (num >= mbstat.m_clfree) | |
2030 | i = num - mbstat.m_clfree; | |
2031 | if (((mbstat.m_clusters + num) >> 1) > mbstat.m_clfree) | |
2032 | j = ((mbstat.m_clusters + num) >> 1) - mbstat.m_clfree; | |
2033 | i = max(i, j); | |
2034 | if (i + mbstat.m_clusters >= (nmbclusters >> 1)) | |
2035 | i = (nmbclusters >> 1) - mbstat.m_clusters; | |
2036 | } | |
2037 | } else { | |
2038 | /* Under minimum */ | |
2039 | if (mbstat.m_bigclusters < MINCL) | |
2040 | return (MINCL - mbstat.m_bigclusters); | |
2041 | /* Too few (free < 1/2 total) and not over maximum */ | |
2042 | if (mbstat.m_bigclusters < (nmbclusters >> 2)) { | |
2043 | if (num >= mbstat.m_bigclfree) | |
2044 | i = num - mbstat.m_bigclfree; | |
2045 | if (((mbstat.m_bigclusters + num) >> 1) > mbstat.m_bigclfree) | |
2046 | j = ((mbstat.m_bigclusters + num) >> 1) - mbstat.m_bigclfree; | |
2047 | i = max(i, j); | |
2048 | if (i + mbstat.m_bigclusters >= (nmbclusters >> 2)) | |
2049 | i = (nmbclusters >> 2) - mbstat.m_bigclusters; | |
2050 | } | |
2051 | } | |
2052 | } | |
2053 | return i; | |
1c79356b A |
2054 | } |
2055 | ||
1c79356b A |
2056 | /* |
2057 | * Copy data from a buffer back into the indicated mbuf chain, | |
2058 | * starting "off" bytes from the beginning, extending the mbuf | |
2059 | * chain if necessary. | |
2060 | */ | |
2061 | void | |
91447636 A |
2062 | m_copyback( |
2063 | struct mbuf *m0, | |
2064 | int off, | |
2065 | int len, | |
2066 | caddr_t cp) | |
1c79356b | 2067 | { |
91447636 A |
2068 | int mlen; |
2069 | struct mbuf *m = m0, *n; | |
1c79356b A |
2070 | int totlen = 0; |
2071 | ||
2072 | if (m0 == 0) | |
2073 | return; | |
2074 | while (off > (mlen = m->m_len)) { | |
2075 | off -= mlen; | |
2076 | totlen += mlen; | |
2077 | if (m->m_next == 0) { | |
2078 | n = m_getclr(M_DONTWAIT, m->m_type); | |
2079 | if (n == 0) | |
2080 | goto out; | |
2081 | n->m_len = min(MLEN, len + off); | |
2082 | m->m_next = n; | |
2083 | } | |
2084 | m = m->m_next; | |
2085 | } | |
2086 | while (len > 0) { | |
2087 | mlen = min (m->m_len - off, len); | |
2088 | bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); | |
2089 | cp += mlen; | |
2090 | len -= mlen; | |
2091 | mlen += off; | |
2092 | off = 0; | |
2093 | totlen += mlen; | |
2094 | if (len == 0) | |
2095 | break; | |
2096 | if (m->m_next == 0) { | |
2097 | n = m_get(M_DONTWAIT, m->m_type); | |
2098 | if (n == 0) | |
2099 | break; | |
2100 | n->m_len = min(MLEN, len); | |
2101 | m->m_next = n; | |
2102 | } | |
2103 | m = m->m_next; | |
2104 | } | |
2105 | out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) | |
2106 | m->m_pkthdr.len = totlen; | |
2107 | } | |
2108 | ||
2109 | ||
91447636 A |
2110 | char *mcl_to_paddr(char *addr) { |
2111 | int base_phys; | |
1c79356b A |
2112 | |
2113 | if (addr < (char *)mbutl || addr >= (char *)embutl) | |
2114 | return (0); | |
91447636 | 2115 | base_phys = mcl_paddr[(addr - (char *)mbutl) >> PGSHIFT]; |
1c79356b A |
2116 | |
2117 | if (base_phys == 0) | |
2118 | return (0); | |
91447636 | 2119 | return ((char *)((int)base_phys | ((int)addr & PGOFSET))); |
1c79356b A |
2120 | } |
2121 | ||
2122 | /* | |
2123 | * Dup the mbuf chain passed in. The whole thing. No cute additional cruft. | |
2124 | * And really copy the thing. That way, we don't "precompute" checksums | |
2125 | * for unsuspecting consumers. | |
2126 | * Assumption: m->m_nextpkt == 0. | |
2127 | * Trick: for small packets, don't dup into a cluster. That way received | |
2128 | * packets don't take up too much room in the sockbuf (cf. sbspace()). | |
2129 | */ | |
2130 | int MDFail; | |
2131 | ||
2132 | struct mbuf * | |
91447636 A |
2133 | m_dup(struct mbuf *m, int how) |
2134 | { | |
2135 | struct mbuf *n, **np; | |
1c79356b A |
2136 | struct mbuf *top; |
2137 | int copyhdr = 0; | |
2138 | ||
2139 | np = ⊤ | |
2140 | top = 0; | |
2141 | if (m->m_flags & M_PKTHDR) | |
2142 | copyhdr = 1; | |
2143 | ||
2144 | /* | |
2145 | * Quick check: if we have one mbuf and its data fits in an | |
2146 | * mbuf with packet header, just copy and go. | |
2147 | */ | |
2148 | if (m->m_next == NULL) | |
2149 | { /* Then just move the data into an mbuf and be done... */ | |
2150 | if (copyhdr) | |
2151 | { if (m->m_pkthdr.len <= MHLEN) | |
2152 | { if ((n = m_gethdr(how, m->m_type)) == NULL) | |
2153 | return(NULL); | |
2154 | n->m_len = m->m_len; | |
3a60a9f5 A |
2155 | m_dup_pkthdr(n, m, how); |
2156 | bcopy(m->m_data, n->m_data, m->m_len); | |
1c79356b A |
2157 | return(n); |
2158 | } | |
2159 | } else if (m->m_len <= MLEN) | |
2160 | { if ((n = m_get(how, m->m_type)) == NULL) | |
2161 | return(NULL); | |
2162 | bcopy(m->m_data, n->m_data, m->m_len); | |
2163 | n->m_len = m->m_len; | |
2164 | return(n); | |
2165 | } | |
2166 | } | |
2167 | while (m) | |
2168 | { | |
2169 | #if BLUE_DEBUG | |
2170 | kprintf("<%x: %x, %x, %x\n", m, m->m_flags, m->m_len, | |
2171 | m->m_data); | |
2172 | #endif | |
2173 | if (copyhdr) | |
2174 | n = m_gethdr(how, m->m_type); | |
2175 | else | |
2176 | n = m_get(how, m->m_type); | |
2177 | if (n == 0) | |
2178 | goto nospace; | |
2179 | if (m->m_flags & M_EXT) | |
2180 | { MCLGET(n, how); | |
2181 | if ((n->m_flags & M_EXT) == 0) | |
2182 | goto nospace; | |
2183 | } | |
2184 | *np = n; | |
2185 | if (copyhdr) | |
2186 | { /* Don't use M_COPY_PKTHDR: preserve m_data */ | |
3a60a9f5 | 2187 | m_dup_pkthdr(n, m, how); |
1c79356b A |
2188 | copyhdr = 0; |
2189 | if ((n->m_flags & M_EXT) == 0) | |
2190 | n->m_data = n->m_pktdat; | |
2191 | } | |
2192 | n->m_len = m->m_len; | |
2193 | /* | |
2194 | * Get the dup on the same bdry as the original | |
2195 | * Assume that the two mbufs have the same offset to data area | |
2196 | * (up to word bdries) | |
2197 | */ | |
2198 | bcopy(mtod(m, caddr_t), mtod(n, caddr_t), (unsigned)n->m_len); | |
2199 | m = m->m_next; | |
2200 | np = &n->m_next; | |
2201 | #if BLUE_DEBUG | |
2202 | kprintf(">%x: %x, %x, %x\n", n, n->m_flags, n->m_len, | |
2203 | n->m_data); | |
2204 | #endif | |
2205 | } | |
2206 | ||
2207 | if (top == 0) | |
2208 | MDFail++; | |
2209 | return (top); | |
2210 | nospace: | |
2211 | m_freem(top); | |
2212 | MDFail++; | |
2213 | return (0); | |
2214 | } | |
2215 | ||
9bccf70c A |
2216 | int |
2217 | m_mclref(struct mbuf *p) | |
2218 | { | |
2219 | return (_MCLREF(p)); | |
2220 | } | |
2221 | ||
2222 | int | |
2223 | m_mclunref(struct mbuf *p) | |
2224 | { | |
2225 | return (_MCLUNREF(p)); | |
2226 | } | |
2227 | ||
2228 | /* change mbuf to new type */ | |
2229 | void | |
2230 | m_mchtype(struct mbuf *m, int t) | |
2231 | { | |
2232 | MBUF_LOCK(); | |
2233 | mbstat.m_mtypes[(m)->m_type]--; | |
2234 | mbstat.m_mtypes[t]++; | |
2235 | (m)->m_type = t; | |
2236 | MBUF_UNLOCK(); | |
2237 | } | |
2238 | ||
2239 | void *m_mtod(struct mbuf *m) | |
2240 | { | |
2241 | return ((m)->m_data); | |
2242 | } | |
2243 | ||
2244 | struct mbuf *m_dtom(void *x) | |
2245 | { | |
2246 | return ((struct mbuf *)((u_long)(x) & ~(MSIZE-1))); | |
2247 | } | |
2248 | ||
2249 | int m_mtocl(void *x) | |
2250 | { | |
2251 | return (((char *)(x) - (char *)mbutl) / sizeof(union mcluster)); | |
2252 | } | |
2253 | ||
2254 | union mcluster *m_cltom(int x) | |
2255 | { | |
2256 | return ((union mcluster *)(mbutl + (x))); | |
2257 | } | |
2258 | ||
2259 | ||
2260 | void m_mcheck(struct mbuf *m) | |
2261 | { | |
2262 | if (m->m_type != MT_FREE) | |
2263 | panic("mget MCHECK: m_type=%x m=%x", m->m_type, m); | |
2264 | } | |
2265 | ||
91447636 | 2266 | static void |
55e303ae | 2267 | mbuf_expand_thread(void) |
1c79356b | 2268 | { |
91447636 A |
2269 | while (1) { |
2270 | MBUF_LOCK(); | |
2271 | if (mbuf_expand_mcl) { | |
2272 | int n; | |
2273 | ||
2274 | /* Adjust to the current number of cluster in use */ | |
2275 | n = mbuf_expand_mcl - (mbstat.m_clusters - mbstat.m_clfree); | |
2276 | mbuf_expand_mcl = 0; | |
2277 | ||
2278 | if (n > 0) | |
2279 | (void)m_clalloc(n, M_WAIT, MCLBYTES, 1); | |
2280 | } | |
2281 | if (mbuf_expand_big) { | |
2282 | int n; | |
2283 | ||
2284 | /* Adjust to the current number of 4 KB cluster in use */ | |
2285 | n = mbuf_expand_big - (mbstat.m_bigclusters - mbstat.m_bigclfree); | |
2286 | mbuf_expand_big = 0; | |
2287 | ||
2288 | if (n > 0) | |
2289 | (void)m_clalloc(n, M_WAIT, NBPG, 1); | |
1c79356b | 2290 | } |
91447636 A |
2291 | MBUF_UNLOCK(); |
2292 | /* | |
2293 | * Because we can run out of memory before filling the mbuf map, we | |
2294 | * should not allocate more clusters than they are mbufs -- otherwise | |
2295 | * we could have a large number of useless clusters allocated. | |
2296 | */ | |
2297 | while (mbstat.m_mbufs < mbstat.m_bigclusters + mbstat.m_clusters) { | |
2298 | if (m_expand(M_WAIT) == 0) | |
2299 | break; | |
2300 | } | |
2301 | ||
2302 | assert_wait(&mbuf_expand_thread_wakeup, THREAD_UNINT); | |
2303 | (void) thread_block((thread_continue_t)mbuf_expand_thread); | |
2304 | } | |
1c79356b A |
2305 | } |
2306 | ||
91447636 | 2307 | static void |
55e303ae A |
2308 | mbuf_expand_thread_init(void) |
2309 | { | |
2310 | mbuf_expand_thread_initialized++; | |
2311 | mbuf_expand_thread(); | |
2312 | } | |
1c79356b | 2313 | |
91447636 A |
2314 | SYSCTL_DECL(_kern_ipc); |
2315 | SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, ""); | |
2316 |