]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
39037602 | 2 | * Copyright (c) 1998-2016 Apple Inc. All rights reserved. |
5d5c5d0d | 3 | * |
2d21ac55 A |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
1c79356b A |
27 | */ |
28 | /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ | |
29 | /* | |
30 | * Copyright (c) 1982, 1986, 1988, 1991, 1993 | |
31 | * The Regents of the University of California. All rights reserved. | |
32 | * | |
33 | * Redistribution and use in source and binary forms, with or without | |
34 | * modification, are permitted provided that the following conditions | |
35 | * are met: | |
36 | * 1. Redistributions of source code must retain the above copyright | |
37 | * notice, this list of conditions and the following disclaimer. | |
38 | * 2. Redistributions in binary form must reproduce the above copyright | |
39 | * notice, this list of conditions and the following disclaimer in the | |
40 | * documentation and/or other materials provided with the distribution. | |
41 | * 3. All advertising materials mentioning features or use of this software | |
42 | * must display the following acknowledgement: | |
43 | * This product includes software developed by the University of | |
44 | * California, Berkeley and its contributors. | |
45 | * 4. Neither the name of the University nor the names of its contributors | |
46 | * may be used to endorse or promote products derived from this software | |
47 | * without specific prior written permission. | |
48 | * | |
49 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
50 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
51 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
52 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
53 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
54 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
55 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
56 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
57 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
58 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
59 | * SUCH DAMAGE. | |
60 | * | |
61 | * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 | |
62 | */ | |
2d21ac55 A |
63 | /* |
64 | * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce | |
65 | * support for mandatory and extensible security protections. This notice | |
66 | * is included in support of clause 2.2 (b) of the Apple Public License, | |
67 | * Version 2.0. | |
1c79356b A |
68 | */ |
69 | ||
70 | #include <sys/param.h> | |
71 | #include <sys/systm.h> | |
72 | #include <sys/malloc.h> | |
73 | #include <sys/mbuf.h> | |
74 | #include <sys/kernel.h> | |
91447636 | 75 | #include <sys/sysctl.h> |
1c79356b A |
76 | #include <sys/syslog.h> |
77 | #include <sys/protosw.h> | |
78 | #include <sys/domain.h> | |
2d21ac55 | 79 | #include <sys/queue.h> |
b0d623f7 | 80 | #include <sys/proc.h> |
1c79356b | 81 | |
39236c6e A |
82 | #include <dev/random/randomdev.h> |
83 | ||
9bccf70c | 84 | #include <kern/kern_types.h> |
2d21ac55 A |
85 | #include <kern/simple_lock.h> |
86 | #include <kern/queue.h> | |
9bccf70c | 87 | #include <kern/sched_prim.h> |
39037602 | 88 | #include <kern/backtrace.h> |
2d21ac55 | 89 | #include <kern/cpu_number.h> |
6d2010ae | 90 | #include <kern/zalloc.h> |
2d21ac55 A |
91 | |
92 | #include <libkern/OSAtomic.h> | |
39236c6e | 93 | #include <libkern/OSDebug.h> |
2d21ac55 | 94 | #include <libkern/libkern.h> |
9bccf70c | 95 | |
55e303ae A |
96 | #include <IOKit/IOMapper.h> |
97 | ||
2d21ac55 A |
98 | #include <machine/limits.h> |
99 | #include <machine/machine_routines.h> | |
55e303ae | 100 | |
2d21ac55 A |
101 | #if CONFIG_MACF_NET |
102 | #include <security/mac_framework.h> | |
103 | #endif /* MAC_NET */ | |
104 | ||
105 | #include <sys/mcache.h> | |
fe8ab488 | 106 | #include <net/ntstat.h> |
1c79356b | 107 | |
2d21ac55 A |
108 | /* |
109 | * MBUF IMPLEMENTATION NOTES. | |
110 | * | |
111 | * There is a total of 5 per-CPU caches: | |
112 | * | |
113 | * MC_MBUF: | |
114 | * This is a cache of rudimentary objects of MSIZE in size; each | |
115 | * object represents an mbuf structure. This cache preserves only | |
116 | * the m_type field of the mbuf during its transactions. | |
117 | * | |
118 | * MC_CL: | |
119 | * This is a cache of rudimentary objects of MCLBYTES in size; each | |
120 | * object represents a mcluster structure. This cache does not | |
121 | * preserve the contents of the objects during its transactions. | |
122 | * | |
123 | * MC_BIGCL: | |
6d2010ae | 124 | * This is a cache of rudimentary objects of MBIGCLBYTES in size; each |
2d21ac55 A |
125 | * object represents a mbigcluster structure. This cache does not |
126 | * preserve the contents of the objects during its transaction. | |
127 | * | |
128 | * MC_MBUF_CL: | |
129 | * This is a cache of mbufs each having a cluster attached to it. | |
130 | * It is backed by MC_MBUF and MC_CL rudimentary caches. Several | |
131 | * fields of the mbuf related to the external cluster are preserved | |
132 | * during transactions. | |
133 | * | |
134 | * MC_MBUF_BIGCL: | |
135 | * This is a cache of mbufs each having a big cluster attached to it. | |
136 | * It is backed by MC_MBUF and MC_BIGCL rudimentary caches. Several | |
137 | * fields of the mbuf related to the external cluster are preserved | |
138 | * during transactions. | |
139 | * | |
140 | * OBJECT ALLOCATION: | |
141 | * | |
142 | * Allocation requests are handled first at the per-CPU (mcache) layer | |
143 | * before falling back to the slab layer. Performance is optimal when | |
144 | * the request is satisfied at the CPU layer because global data/lock | |
145 | * never gets accessed. When the slab layer is entered for allocation, | |
146 | * the slab freelist will be checked first for available objects before | |
147 | * the VM backing store is invoked. Slab layer operations are serialized | |
148 | * for all of the caches as the mbuf global lock is held most of the time. | |
149 | * Allocation paths are different depending on the class of objects: | |
150 | * | |
151 | * a. Rudimentary object: | |
152 | * | |
153 | * { m_get_common(), m_clattach(), m_mclget(), | |
154 | * m_mclalloc(), m_bigalloc(), m_copym_with_hdrs(), | |
155 | * composite object allocation } | |
156 | * | ^ | |
157 | * | | | |
158 | * | +-----------------------+ | |
159 | * v | | |
160 | * mcache_alloc/mcache_alloc_ext() mbuf_slab_audit() | |
161 | * | ^ | |
162 | * v | | |
163 | * [CPU cache] -------> (found?) -------+ | |
164 | * | | | |
165 | * v | | |
166 | * mbuf_slab_alloc() | | |
167 | * | | | |
168 | * v | | |
169 | * +---------> [freelist] -------> (found?) -------+ | |
170 | * | | | |
171 | * | v | |
172 | * | m_clalloc() | |
173 | * | | | |
174 | * | v | |
175 | * +---<<---- kmem_mb_alloc() | |
176 | * | |
177 | * b. Composite object: | |
178 | * | |
179 | * { m_getpackets_internal(), m_allocpacket_internal() } | |
180 | * | ^ | |
181 | * | | | |
182 | * | +------ (done) ---------+ | |
183 | * v | | |
184 | * mcache_alloc/mcache_alloc_ext() mbuf_cslab_audit() | |
185 | * | ^ | |
186 | * v | | |
187 | * [CPU cache] -------> (found?) -------+ | |
188 | * | | | |
189 | * v | | |
190 | * mbuf_cslab_alloc() | | |
191 | * | | | |
192 | * v | | |
193 | * [freelist] -------> (found?) -------+ | |
194 | * | | | |
195 | * v | | |
196 | * (rudimentary object) | | |
197 | * mcache_alloc/mcache_alloc_ext() ------>>-----+ | |
198 | * | |
199 | * Auditing notes: If auditing is enabled, buffers will be subjected to | |
200 | * integrity checks by the audit routine. This is done by verifying their | |
201 | * contents against DEADBEEF (free) pattern before returning them to caller. | |
202 | * As part of this step, the routine will also record the transaction and | |
203 | * pattern-fill the buffers with BADDCAFE (uninitialized) pattern. It will | |
204 | * also restore any constructed data structure fields if necessary. | |
205 | * | |
206 | * OBJECT DEALLOCATION: | |
207 | * | |
208 | * Freeing an object simply involves placing it into the CPU cache; this | |
209 | * pollutes the cache to benefit subsequent allocations. The slab layer | |
210 | * will only be entered if the object is to be purged out of the cache. | |
211 | * During normal operations, this happens only when the CPU layer resizes | |
212 | * its bucket while it's adjusting to the allocation load. Deallocation | |
213 | * paths are different depending on the class of objects: | |
214 | * | |
215 | * a. Rudimentary object: | |
216 | * | |
217 | * { m_free(), m_freem_list(), composite object deallocation } | |
218 | * | ^ | |
219 | * | | | |
220 | * | +------ (done) ---------+ | |
221 | * v | | |
222 | * mcache_free/mcache_free_ext() | | |
223 | * | | | |
224 | * v | | |
225 | * mbuf_slab_audit() | | |
226 | * | | | |
227 | * v | | |
228 | * [CPU cache] ---> (not purging?) -----+ | |
229 | * | | | |
230 | * v | | |
231 | * mbuf_slab_free() | | |
232 | * | | | |
233 | * v | | |
234 | * [freelist] ----------->>------------+ | |
3e170ce0 | 235 | * (objects get purged to VM only on demand) |
2d21ac55 A |
236 | * |
237 | * b. Composite object: | |
238 | * | |
239 | * { m_free(), m_freem_list() } | |
240 | * | ^ | |
241 | * | | | |
242 | * | +------ (done) ---------+ | |
243 | * v | | |
244 | * mcache_free/mcache_free_ext() | | |
245 | * | | | |
246 | * v | | |
247 | * mbuf_cslab_audit() | | |
248 | * | | | |
249 | * v | | |
250 | * [CPU cache] ---> (not purging?) -----+ | |
251 | * | | | |
252 | * v | | |
253 | * mbuf_cslab_free() | | |
254 | * | | | |
255 | * v | | |
256 | * [freelist] ---> (not purging?) -----+ | |
257 | * | | | |
258 | * v | | |
259 | * (rudimentary object) | | |
260 | * mcache_free/mcache_free_ext() ------->>------+ | |
261 | * | |
262 | * Auditing notes: If auditing is enabled, the audit routine will save | |
263 | * any constructed data structure fields (if necessary) before filling the | |
264 | * contents of the buffers with DEADBEEF (free) pattern and recording the | |
265 | * transaction. Buffers that are freed (whether at CPU or slab layer) are | |
266 | * expected to contain the free pattern. | |
267 | * | |
268 | * DEBUGGING: | |
269 | * | |
270 | * Debugging can be enabled by adding "mbuf_debug=0x3" to boot-args; this | |
271 | * translates to the mcache flags (MCF_VERIFY | MCF_AUDIT). Additionally, | |
272 | * the CPU layer cache can be disabled by setting the MCF_NOCPUCACHE flag, | |
6d2010ae A |
273 | * i.e. modify the boot argument parameter to "mbuf_debug=0x13". Leak |
274 | * detection may also be disabled by setting the MCF_NOLEAKLOG flag, e.g. | |
275 | * "mbuf_debug=0x113". Note that debugging consumes more CPU and memory. | |
2d21ac55 A |
276 | * |
277 | * Each object is associated with exactly one mcache_audit_t structure that | |
278 | * contains the information related to its last buffer transaction. Given | |
279 | * an address of an object, the audit structure can be retrieved by finding | |
280 | * the position of the object relevant to the base address of the cluster: | |
281 | * | |
282 | * +------------+ +=============+ | |
283 | * | mbuf addr | | mclaudit[i] | | |
284 | * +------------+ +=============+ | |
285 | * | | cl_audit[0] | | |
6d2010ae | 286 | * i = MTOBG(addr) +-------------+ |
2d21ac55 | 287 | * | +-----> | cl_audit[1] | -----> mcache_audit_t |
6d2010ae | 288 | * b = BGTOM(i) | +-------------+ |
2d21ac55 A |
289 | * | | | ... | |
290 | * x = MCLIDX(b, addr) | +-------------+ | |
291 | * | | | cl_audit[7] | | |
292 | * +-----------------+ +-------------+ | |
293 | * (e.g. x == 1) | |
294 | * | |
295 | * The mclaudit[] array is allocated at initialization time, but its contents | |
6d2010ae | 296 | * get populated when the corresponding cluster is created. Because a page |
3e170ce0 | 297 | * can be turned into NMBPG number of mbufs, we preserve enough space for the |
6d2010ae | 298 | * mbufs so that there is a 1-to-1 mapping between them. A page that never |
2d21ac55 | 299 | * gets (or has not yet) turned into mbufs will use only cl_audit[0] with the |
6d2010ae A |
300 | * remaining entries unused. For 16KB cluster, only one entry from the first |
301 | * page is allocated and used for the entire object. | |
2d21ac55 | 302 | */ |
91447636 | 303 | |
2d21ac55 A |
304 | /* TODO: should be in header file */ |
305 | /* kernel translater */ | |
b0d623f7 | 306 | extern vm_offset_t kmem_mb_alloc(vm_map_t, int, int); |
2d21ac55 | 307 | extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); |
1c79356b | 308 | extern vm_map_t mb_map; /* special map */ |
2d21ac55 A |
309 | |
310 | /* Global lock */ | |
316670eb A |
311 | decl_lck_mtx_data(static, mbuf_mlock_data); |
312 | static lck_mtx_t *mbuf_mlock = &mbuf_mlock_data; | |
2d21ac55 A |
313 | static lck_attr_t *mbuf_mlock_attr; |
314 | static lck_grp_t *mbuf_mlock_grp; | |
315 | static lck_grp_attr_t *mbuf_mlock_grp_attr; | |
316 | ||
317 | /* Back-end (common) layer */ | |
39037602 | 318 | static boolean_t mbuf_worker_needs_wakeup; /* wait channel for mbuf worker */ |
2d21ac55 A |
319 | static int mbuf_worker_ready; /* worker thread is runnable */ |
320 | static int mbuf_expand_mcl; /* number of cluster creation requets */ | |
321 | static int mbuf_expand_big; /* number of big cluster creation requests */ | |
6d2010ae | 322 | static int mbuf_expand_16k; /* number of 16KB cluster creation requests */ |
2d21ac55 | 323 | static int ncpu; /* number of CPUs */ |
b0d623f7 A |
324 | static ppnum_t *mcl_paddr; /* Array of cluster physical addresses */ |
325 | static ppnum_t mcl_pages; /* Size of array (# physical pages) */ | |
55e303ae | 326 | static ppnum_t mcl_paddr_base; /* Handle returned by IOMapper::iovmAlloc() */ |
2d21ac55 A |
327 | static mcache_t *ref_cache; /* Cache of cluster reference & flags */ |
328 | static mcache_t *mcl_audit_con_cache; /* Audit contents cache */ | |
329 | static unsigned int mbuf_debug; /* patchable mbuf mcache flags */ | |
330 | static unsigned int mb_normalized; /* number of packets "normalized" */ | |
b0d623f7 A |
331 | |
332 | #define MB_GROWTH_AGGRESSIVE 1 /* Threshold: 1/2 of total */ | |
6d2010ae | 333 | #define MB_GROWTH_NORMAL 2 /* Threshold: 3/4 of total */ |
2d21ac55 A |
334 | |
335 | typedef enum { | |
336 | MC_MBUF = 0, /* Regular mbuf */ | |
337 | MC_CL, /* Cluster */ | |
6d2010ae A |
338 | MC_BIGCL, /* Large (4KB) cluster */ |
339 | MC_16KCL, /* Jumbo (16KB) cluster */ | |
2d21ac55 | 340 | MC_MBUF_CL, /* mbuf + cluster */ |
6d2010ae A |
341 | MC_MBUF_BIGCL, /* mbuf + large (4KB) cluster */ |
342 | MC_MBUF_16KCL /* mbuf + jumbo (16KB) cluster */ | |
2d21ac55 A |
343 | } mbuf_class_t; |
344 | ||
345 | #define MBUF_CLASS_MIN MC_MBUF | |
346 | #define MBUF_CLASS_MAX MC_MBUF_16KCL | |
347 | #define MBUF_CLASS_LAST MC_16KCL | |
348 | #define MBUF_CLASS_VALID(c) \ | |
349 | ((int)(c) >= MBUF_CLASS_MIN && (int)(c) <= MBUF_CLASS_MAX) | |
350 | #define MBUF_CLASS_COMPOSITE(c) \ | |
351 | ((int)(c) > MBUF_CLASS_LAST) | |
91447636 | 352 | |
9bccf70c | 353 | |
2d21ac55 A |
354 | /* |
355 | * mbuf specific mcache allocation request flags. | |
356 | */ | |
357 | #define MCR_COMP MCR_USR1 /* for MC_MBUF_{CL,BIGCL,16KCL} caches */ | |
9bccf70c | 358 | |
2d21ac55 A |
359 | /* |
360 | * Per-cluster slab structure. | |
361 | * | |
362 | * A slab is a cluster control structure that contains one or more object | |
363 | * chunks; the available chunks are chained in the slab's freelist (sl_head). | |
364 | * Each time a chunk is taken out of the slab, the slab's reference count | |
365 | * gets incremented. When all chunks have been taken out, the empty slab | |
366 | * gets removed (SLF_DETACHED) from the class's slab list. A chunk that is | |
367 | * returned to a slab causes the slab's reference count to be decremented; | |
368 | * it also causes the slab to be reinserted back to class's slab list, if | |
369 | * it's not already done. | |
370 | * | |
371 | * Compartmentalizing of the object chunks into slabs allows us to easily | |
372 | * merge one or more slabs together when the adjacent slabs are idle, as | |
373 | * well as to convert or move a slab from one class to another; e.g. the | |
374 | * mbuf cluster slab can be converted to a regular cluster slab when all | |
375 | * mbufs in the slab have been freed. | |
376 | * | |
377 | * A slab may also span across multiple clusters for chunks larger than | |
378 | * a cluster's size. In this case, only the slab of the first cluster is | |
379 | * used. The rest of the slabs are marked with SLF_PARTIAL to indicate | |
380 | * that they are part of the larger slab. | |
6d2010ae A |
381 | * |
382 | * Each slab controls a page of memory. | |
2d21ac55 A |
383 | */ |
384 | typedef struct mcl_slab { | |
385 | struct mcl_slab *sl_next; /* neighboring slab */ | |
386 | u_int8_t sl_class; /* controlling mbuf class */ | |
387 | int8_t sl_refcnt; /* outstanding allocations */ | |
388 | int8_t sl_chunks; /* chunks (bufs) in this slab */ | |
389 | u_int16_t sl_flags; /* slab flags (see below) */ | |
390 | u_int16_t sl_len; /* slab length */ | |
391 | void *sl_base; /* base of allocated memory */ | |
392 | void *sl_head; /* first free buffer */ | |
393 | TAILQ_ENTRY(mcl_slab) sl_link; /* next/prev slab on freelist */ | |
394 | } mcl_slab_t; | |
395 | ||
396 | #define SLF_MAPPED 0x0001 /* backed by a mapped page */ | |
397 | #define SLF_PARTIAL 0x0002 /* part of another slab */ | |
398 | #define SLF_DETACHED 0x0004 /* not in slab freelist */ | |
1c79356b | 399 | |
2d21ac55 A |
400 | /* |
401 | * The array of slabs are broken into groups of arrays per 1MB of kernel | |
402 | * memory to reduce the footprint. Each group is allocated on demand | |
403 | * whenever a new piece of memory mapped in from the VM crosses the 1MB | |
404 | * boundary. | |
405 | */ | |
3e170ce0 | 406 | #define NSLABSPMB ((1 << MBSHIFT) >> PAGE_SHIFT) |
91447636 | 407 | |
2d21ac55 | 408 | typedef struct mcl_slabg { |
3e170ce0 | 409 | mcl_slab_t *slg_slab; /* group of slabs */ |
2d21ac55 | 410 | } mcl_slabg_t; |
1c79356b | 411 | |
6d2010ae A |
412 | /* |
413 | * Number of slabs needed to control a 16KB cluster object. | |
414 | */ | |
3e170ce0 | 415 | #define NSLABSP16KB (M16KCLBYTES >> PAGE_SHIFT) |
6d2010ae | 416 | |
2d21ac55 A |
417 | /* |
418 | * Per-cluster audit structure. | |
419 | */ | |
420 | typedef struct { | |
3e170ce0 | 421 | mcache_audit_t **cl_audit; /* array of audits */ |
2d21ac55 | 422 | } mcl_audit_t; |
91447636 | 423 | |
39236c6e A |
424 | typedef struct { |
425 | struct thread *msa_thread; /* thread doing transaction */ | |
426 | struct thread *msa_pthread; /* previous transaction thread */ | |
427 | uint32_t msa_tstamp; /* transaction timestamp (ms) */ | |
428 | uint32_t msa_ptstamp; /* prev transaction timestamp (ms) */ | |
429 | uint16_t msa_depth; /* pc stack depth */ | |
430 | uint16_t msa_pdepth; /* previous transaction pc stack */ | |
431 | void *msa_stack[MCACHE_STACK_DEPTH]; | |
432 | void *msa_pstack[MCACHE_STACK_DEPTH]; | |
433 | } mcl_scratch_audit_t; | |
434 | ||
435 | typedef struct { | |
436 | /* | |
437 | * Size of data from the beginning of an mbuf that covers m_hdr, | |
438 | * pkthdr and m_ext structures. If auditing is enabled, we allocate | |
439 | * a shadow mbuf structure of this size inside each audit structure, | |
440 | * and the contents of the real mbuf gets copied into it when the mbuf | |
441 | * is freed. This allows us to pattern-fill the mbuf for integrity | |
442 | * check, and to preserve any constructed mbuf fields (e.g. mbuf + | |
443 | * cluster cache case). Note that we don't save the contents of | |
444 | * clusters when they are freed; we simply pattern-fill them. | |
445 | */ | |
446 | u_int8_t sc_mbuf[(MSIZE - _MHLEN) + sizeof (_m_ext_t)]; | |
447 | mcl_scratch_audit_t sc_scratch __attribute__((aligned(8))); | |
448 | } mcl_saved_contents_t; | |
449 | ||
450 | #define AUDIT_CONTENTS_SIZE (sizeof (mcl_saved_contents_t)) | |
451 | ||
452 | #define MCA_SAVED_MBUF_PTR(_mca) \ | |
453 | ((struct mbuf *)(void *)((mcl_saved_contents_t *) \ | |
454 | (_mca)->mca_contents)->sc_mbuf) | |
455 | #define MCA_SAVED_MBUF_SIZE \ | |
456 | (sizeof (((mcl_saved_contents_t *)0)->sc_mbuf)) | |
457 | #define MCA_SAVED_SCRATCH_PTR(_mca) \ | |
458 | (&((mcl_saved_contents_t *)(_mca)->mca_contents)->sc_scratch) | |
fa4905b1 | 459 | |
2d21ac55 A |
460 | /* |
461 | * mbuf specific mcache audit flags | |
462 | */ | |
463 | #define MB_INUSE 0x01 /* object has not been returned to slab */ | |
464 | #define MB_COMP_INUSE 0x02 /* object has not been returned to cslab */ | |
465 | #define MB_SCVALID 0x04 /* object has valid saved contents */ | |
fa4905b1 | 466 | |
2d21ac55 A |
467 | /* |
468 | * Each of the following two arrays hold up to nmbclusters elements. | |
469 | */ | |
470 | static mcl_audit_t *mclaudit; /* array of cluster audit information */ | |
6d2010ae | 471 | static unsigned int maxclaudit; /* max # of entries in audit table */ |
2d21ac55 A |
472 | static mcl_slabg_t **slabstbl; /* cluster slabs table */ |
473 | static unsigned int maxslabgrp; /* max # of entries in slabs table */ | |
474 | static unsigned int slabgrp; /* # of entries in slabs table */ | |
475 | ||
476 | /* Globals */ | |
477 | int nclusters; /* # of clusters for non-jumbo (legacy) sizes */ | |
478 | int njcl; /* # of clusters for jumbo sizes */ | |
479 | int njclbytes; /* size of a jumbo cluster */ | |
3e170ce0 A |
480 | unsigned char *mbutl; /* first mapped cluster address */ |
481 | unsigned char *embutl; /* ending virtual address of mclusters */ | |
316670eb A |
482 | int _max_linkhdr; /* largest link-level header */ |
483 | int _max_protohdr; /* largest protocol header */ | |
2d21ac55 A |
484 | int max_hdr; /* largest link+protocol header */ |
485 | int max_datalen; /* MHLEN - max_hdr */ | |
486 | ||
6d2010ae A |
487 | static boolean_t mclverify; /* debug: pattern-checking */ |
488 | static boolean_t mcltrace; /* debug: stack tracing */ | |
489 | static boolean_t mclfindleak; /* debug: leak detection */ | |
316670eb | 490 | static boolean_t mclexpleak; /* debug: expose leak info to user space */ |
6d2010ae | 491 | |
39236c6e A |
492 | static struct timeval mb_start; /* beginning of time */ |
493 | ||
6d2010ae A |
494 | /* mbuf leak detection variables */ |
495 | static struct mleak_table mleak_table; | |
496 | static mleak_stat_t *mleak_stat; | |
497 | ||
498 | #define MLEAK_STAT_SIZE(n) \ | |
499 | ((size_t)(&((mleak_stat_t *)0)->ml_trace[n])) | |
500 | ||
501 | struct mallocation { | |
502 | mcache_obj_t *element; /* the alloc'ed element, NULL if unused */ | |
503 | u_int32_t trace_index; /* mtrace index for corresponding backtrace */ | |
504 | u_int32_t count; /* How many objects were requested */ | |
505 | u_int64_t hitcount; /* for determining hash effectiveness */ | |
506 | }; | |
507 | ||
508 | struct mtrace { | |
509 | u_int64_t collisions; | |
510 | u_int64_t hitcount; | |
511 | u_int64_t allocs; | |
512 | u_int64_t depth; | |
513 | uintptr_t addr[MLEAK_STACK_DEPTH]; | |
514 | }; | |
515 | ||
516 | /* Size must be a power of two for the zhash to be able to just mask off bits */ | |
517 | #define MLEAK_ALLOCATION_MAP_NUM 512 | |
518 | #define MLEAK_TRACE_MAP_NUM 256 | |
519 | ||
520 | /* | |
521 | * Sample factor for how often to record a trace. This is overwritable | |
522 | * by the boot-arg mleak_sample_factor. | |
523 | */ | |
524 | #define MLEAK_SAMPLE_FACTOR 500 | |
525 | ||
526 | /* | |
527 | * Number of top leakers recorded. | |
528 | */ | |
529 | #define MLEAK_NUM_TRACES 5 | |
530 | ||
316670eb A |
531 | #define MB_LEAK_SPACING_64 " " |
532 | #define MB_LEAK_SPACING_32 " " | |
533 | ||
534 | ||
535 | #define MB_LEAK_HDR_32 "\n\ | |
536 | trace [1] trace [2] trace [3] trace [4] trace [5] \n\ | |
537 | ---------- ---------- ---------- ---------- ---------- \n\ | |
538 | " | |
539 | ||
540 | #define MB_LEAK_HDR_64 "\n\ | |
541 | trace [1] trace [2] trace [3] \ | |
542 | trace [4] trace [5] \n\ | |
543 | ------------------ ------------------ ------------------ \ | |
544 | ------------------ ------------------ \n\ | |
545 | " | |
546 | ||
6d2010ae A |
547 | static uint32_t mleak_alloc_buckets = MLEAK_ALLOCATION_MAP_NUM; |
548 | static uint32_t mleak_trace_buckets = MLEAK_TRACE_MAP_NUM; | |
549 | ||
550 | /* Hashmaps of allocations and their corresponding traces */ | |
551 | static struct mallocation *mleak_allocations; | |
552 | static struct mtrace *mleak_traces; | |
553 | static struct mtrace *mleak_top_trace[MLEAK_NUM_TRACES]; | |
554 | ||
555 | /* Lock to protect mleak tables from concurrent modification */ | |
316670eb A |
556 | decl_lck_mtx_data(static, mleak_lock_data); |
557 | static lck_mtx_t *mleak_lock = &mleak_lock_data; | |
6d2010ae A |
558 | static lck_attr_t *mleak_lock_attr; |
559 | static lck_grp_t *mleak_lock_grp; | |
560 | static lck_grp_attr_t *mleak_lock_grp_attr; | |
561 | ||
39037602 A |
562 | /* Lock to protect the completion callback table */ |
563 | static lck_grp_attr_t *mbuf_tx_compl_tbl_lck_grp_attr = NULL; | |
564 | static lck_attr_t *mbuf_tx_compl_tbl_lck_attr = NULL; | |
565 | static lck_grp_t *mbuf_tx_compl_tbl_lck_grp = NULL; | |
566 | decl_lck_rw_data(, mbuf_tx_compl_tbl_lck_rw_data); | |
567 | lck_rw_t *mbuf_tx_compl_tbl_lock = &mbuf_tx_compl_tbl_lck_rw_data; | |
568 | ||
b0d623f7 A |
569 | extern u_int32_t high_sb_max; |
570 | ||
2d21ac55 A |
571 | /* The minimum number of objects that are allocated, to start. */ |
572 | #define MINCL 32 | |
573 | #define MINBIGCL (MINCL >> 1) | |
574 | #define MIN16KCL (MINCL >> 2) | |
575 | ||
576 | /* Low watermarks (only map in pages once free counts go below) */ | |
2d21ac55 A |
577 | #define MBIGCL_LOWAT MINBIGCL |
578 | #define M16KCL_LOWAT MIN16KCL | |
579 | ||
580 | typedef struct { | |
581 | mbuf_class_t mtbl_class; /* class type */ | |
582 | mcache_t *mtbl_cache; /* mcache for this buffer class */ | |
583 | TAILQ_HEAD(mcl_slhead, mcl_slab) mtbl_slablist; /* slab list */ | |
584 | mcache_obj_t *mtbl_cobjlist; /* composite objects freelist */ | |
585 | mb_class_stat_t *mtbl_stats; /* statistics fetchable via sysctl */ | |
586 | u_int32_t mtbl_maxsize; /* maximum buffer size */ | |
587 | int mtbl_minlimit; /* minimum allowed */ | |
588 | int mtbl_maxlimit; /* maximum allowed */ | |
589 | u_int32_t mtbl_wantpurge; /* purge during next reclaim */ | |
fe8ab488 | 590 | uint32_t mtbl_avgtotal; /* average total on iOS */ |
2d21ac55 A |
591 | } mbuf_table_t; |
592 | ||
593 | #define m_class(c) mbuf_table[c].mtbl_class | |
594 | #define m_cache(c) mbuf_table[c].mtbl_cache | |
595 | #define m_slablist(c) mbuf_table[c].mtbl_slablist | |
596 | #define m_cobjlist(c) mbuf_table[c].mtbl_cobjlist | |
597 | #define m_maxsize(c) mbuf_table[c].mtbl_maxsize | |
598 | #define m_minlimit(c) mbuf_table[c].mtbl_minlimit | |
599 | #define m_maxlimit(c) mbuf_table[c].mtbl_maxlimit | |
600 | #define m_wantpurge(c) mbuf_table[c].mtbl_wantpurge | |
fe8ab488 | 601 | #define m_avgtotal(c) mbuf_table[c].mtbl_avgtotal |
2d21ac55 A |
602 | #define m_cname(c) mbuf_table[c].mtbl_stats->mbcl_cname |
603 | #define m_size(c) mbuf_table[c].mtbl_stats->mbcl_size | |
604 | #define m_total(c) mbuf_table[c].mtbl_stats->mbcl_total | |
605 | #define m_active(c) mbuf_table[c].mtbl_stats->mbcl_active | |
606 | #define m_infree(c) mbuf_table[c].mtbl_stats->mbcl_infree | |
607 | #define m_slab_cnt(c) mbuf_table[c].mtbl_stats->mbcl_slab_cnt | |
608 | #define m_alloc_cnt(c) mbuf_table[c].mtbl_stats->mbcl_alloc_cnt | |
609 | #define m_free_cnt(c) mbuf_table[c].mtbl_stats->mbcl_free_cnt | |
610 | #define m_notified(c) mbuf_table[c].mtbl_stats->mbcl_notified | |
611 | #define m_purge_cnt(c) mbuf_table[c].mtbl_stats->mbcl_purge_cnt | |
612 | #define m_fail_cnt(c) mbuf_table[c].mtbl_stats->mbcl_fail_cnt | |
613 | #define m_ctotal(c) mbuf_table[c].mtbl_stats->mbcl_ctotal | |
fe8ab488 A |
614 | #define m_peak(c) mbuf_table[c].mtbl_stats->mbcl_peak_reported |
615 | #define m_release_cnt(c) mbuf_table[c].mtbl_stats->mbcl_release_cnt | |
2d21ac55 A |
616 | |
617 | static mbuf_table_t mbuf_table[] = { | |
618 | /* | |
619 | * The caches for mbufs, regular clusters and big clusters. | |
fe8ab488 A |
620 | * The average total values were based on data gathered by actual |
621 | * usage patterns on iOS. | |
2d21ac55 A |
622 | */ |
623 | { MC_MBUF, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_MBUF)), | |
fe8ab488 | 624 | NULL, NULL, 0, 0, 0, 0, 3000 }, |
2d21ac55 | 625 | { MC_CL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_CL)), |
fe8ab488 | 626 | NULL, NULL, 0, 0, 0, 0, 2000 }, |
2d21ac55 | 627 | { MC_BIGCL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_BIGCL)), |
fe8ab488 | 628 | NULL, NULL, 0, 0, 0, 0, 1000 }, |
2d21ac55 | 629 | { MC_16KCL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_16KCL)), |
fe8ab488 | 630 | NULL, NULL, 0, 0, 0, 0, 1000 }, |
2d21ac55 A |
631 | /* |
632 | * The following are special caches; they serve as intermediate | |
633 | * caches backed by the above rudimentary caches. Each object | |
634 | * in the cache is an mbuf with a cluster attached to it. Unlike | |
635 | * the above caches, these intermediate caches do not directly | |
636 | * deal with the slab structures; instead, the constructed | |
637 | * cached elements are simply stored in the freelists. | |
638 | */ | |
fe8ab488 A |
639 | { MC_MBUF_CL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 2000 }, |
640 | { MC_MBUF_BIGCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 1000 }, | |
641 | { MC_MBUF_16KCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 1000 }, | |
2d21ac55 A |
642 | }; |
643 | ||
644 | #define NELEM(a) (sizeof (a) / sizeof ((a)[0])) | |
645 | ||
646 | static void *mb_waitchan = &mbuf_table; /* wait channel for all caches */ | |
6d2010ae A |
647 | static int mb_waiters; /* number of waiters */ |
648 | ||
fe8ab488 A |
649 | boolean_t mb_peak_newreport = FALSE; |
650 | boolean_t mb_peak_firstreport = FALSE; | |
651 | ||
652 | /* generate a report by default after 1 week of uptime */ | |
653 | #define MBUF_PEAK_FIRST_REPORT_THRESHOLD 604800 | |
654 | ||
6d2010ae A |
655 | #define MB_WDT_MAXTIME 10 /* # of secs before watchdog panic */ |
656 | static struct timeval mb_wdtstart; /* watchdog start timestamp */ | |
316670eb A |
657 | static char *mbuf_dump_buf; |
658 | ||
659 | #define MBUF_DUMP_BUF_SIZE 2048 | |
6d2010ae A |
660 | |
661 | /* | |
662 | * mbuf watchdog is enabled by default on embedded platforms. It is | |
663 | * also toggeable via the kern.ipc.mb_watchdog sysctl. | |
fe8ab488 A |
664 | * Garbage collection is also enabled by default on embedded platforms. |
665 | * mb_drain_maxint controls the amount of time to wait (in seconds) before | |
666 | * consecutive calls to m_drain(). | |
6d2010ae | 667 | */ |
6d2010ae | 668 | static unsigned int mb_watchdog = 0; |
fe8ab488 | 669 | static unsigned int mb_drain_maxint = 0; |
39236c6e | 670 | |
813fb2f6 A |
671 | uintptr_t mb_obscure_extfree __attribute__((visibility("hidden"))); |
672 | uintptr_t mb_obscure_extref __attribute__((visibility("hidden"))); | |
673 | ||
39236c6e A |
674 | /* Red zone */ |
675 | static u_int32_t mb_redzone_cookie; | |
676 | static void m_redzone_init(struct mbuf *); | |
677 | static void m_redzone_verify(struct mbuf *m); | |
2d21ac55 A |
678 | |
679 | /* The following are used to serialize m_clalloc() */ | |
680 | static boolean_t mb_clalloc_busy; | |
681 | static void *mb_clalloc_waitchan = &mb_clalloc_busy; | |
682 | static int mb_clalloc_waiters; | |
683 | ||
6d2010ae | 684 | static void mbuf_mtypes_sync(boolean_t); |
2d21ac55 | 685 | static int mbstat_sysctl SYSCTL_HANDLER_ARGS; |
6d2010ae | 686 | static void mbuf_stat_sync(void); |
2d21ac55 | 687 | static int mb_stat_sysctl SYSCTL_HANDLER_ARGS; |
6d2010ae A |
688 | static int mleak_top_trace_sysctl SYSCTL_HANDLER_ARGS; |
689 | static int mleak_table_sysctl SYSCTL_HANDLER_ARGS; | |
690 | static char *mbuf_dump(void); | |
2d21ac55 A |
691 | static void mbuf_table_init(void); |
692 | static inline void m_incref(struct mbuf *); | |
39037602 | 693 | static inline u_int16_t m_decref(struct mbuf *); |
2d21ac55 A |
694 | static int m_clalloc(const u_int32_t, const int, const u_int32_t); |
695 | static void mbuf_worker_thread_init(void); | |
696 | static mcache_obj_t *slab_alloc(mbuf_class_t, int); | |
697 | static void slab_free(mbuf_class_t, mcache_obj_t *); | |
698 | static unsigned int mbuf_slab_alloc(void *, mcache_obj_t ***, | |
699 | unsigned int, int); | |
700 | static void mbuf_slab_free(void *, mcache_obj_t *, int); | |
701 | static void mbuf_slab_audit(void *, mcache_obj_t *, boolean_t); | |
702 | static void mbuf_slab_notify(void *, u_int32_t); | |
703 | static unsigned int cslab_alloc(mbuf_class_t, mcache_obj_t ***, | |
704 | unsigned int); | |
705 | static unsigned int cslab_free(mbuf_class_t, mcache_obj_t *, int); | |
706 | static unsigned int mbuf_cslab_alloc(void *, mcache_obj_t ***, | |
707 | unsigned int, int); | |
708 | static void mbuf_cslab_free(void *, mcache_obj_t *, int); | |
709 | static void mbuf_cslab_audit(void *, mcache_obj_t *, boolean_t); | |
710 | static int freelist_populate(mbuf_class_t, unsigned int, int); | |
6d2010ae | 711 | static void freelist_init(mbuf_class_t); |
2d21ac55 A |
712 | static boolean_t mbuf_cached_above(mbuf_class_t, int); |
713 | static boolean_t mbuf_steal(mbuf_class_t, unsigned int); | |
714 | static void m_reclaim(mbuf_class_t, unsigned int, boolean_t); | |
715 | static int m_howmany(int, size_t); | |
716 | static void mbuf_worker_thread(void); | |
6d2010ae | 717 | static void mbuf_watchdog(void); |
2d21ac55 A |
718 | static boolean_t mbuf_sleep(mbuf_class_t, unsigned int, int); |
719 | ||
720 | static void mcl_audit_init(void *, mcache_audit_t **, mcache_obj_t **, | |
721 | size_t, unsigned int); | |
fe8ab488 | 722 | static void mcl_audit_free(void *, unsigned int); |
2d21ac55 A |
723 | static mcache_audit_t *mcl_audit_buf2mca(mbuf_class_t, mcache_obj_t *); |
724 | static void mcl_audit_mbuf(mcache_audit_t *, void *, boolean_t, boolean_t); | |
725 | static void mcl_audit_cluster(mcache_audit_t *, void *, size_t, boolean_t, | |
726 | boolean_t); | |
727 | static void mcl_audit_restore_mbuf(struct mbuf *, mcache_audit_t *, boolean_t); | |
728 | static void mcl_audit_save_mbuf(struct mbuf *, mcache_audit_t *); | |
39236c6e | 729 | static void mcl_audit_scratch(mcache_audit_t *); |
2d21ac55 A |
730 | static void mcl_audit_mcheck_panic(struct mbuf *); |
731 | static void mcl_audit_verify_nextptr(void *, mcache_audit_t *); | |
732 | ||
6d2010ae A |
733 | static void mleak_activate(void); |
734 | static void mleak_logger(u_int32_t, mcache_obj_t *, boolean_t); | |
735 | static boolean_t mleak_log(uintptr_t *, mcache_obj_t *, uint32_t, int); | |
736 | static void mleak_free(mcache_obj_t *); | |
316670eb A |
737 | static void mleak_sort_traces(void); |
738 | static void mleak_update_stats(void); | |
6d2010ae | 739 | |
2d21ac55 A |
740 | static mcl_slab_t *slab_get(void *); |
741 | static void slab_init(mcl_slab_t *, mbuf_class_t, u_int32_t, | |
742 | void *, void *, unsigned int, int, int); | |
743 | static void slab_insert(mcl_slab_t *, mbuf_class_t); | |
744 | static void slab_remove(mcl_slab_t *, mbuf_class_t); | |
745 | static boolean_t slab_inrange(mcl_slab_t *, void *); | |
746 | static void slab_nextptr_panic(mcl_slab_t *, void *); | |
747 | static void slab_detach(mcl_slab_t *); | |
748 | static boolean_t slab_is_detached(mcl_slab_t *); | |
749 | ||
b0d623f7 A |
750 | static int m_copyback0(struct mbuf **, int, int, const void *, int, int); |
751 | static struct mbuf *m_split0(struct mbuf *, int, int, int); | |
fe8ab488 A |
752 | __private_extern__ void mbuf_report_peak_usage(void); |
753 | static boolean_t mbuf_report_usage(mbuf_class_t); | |
b0d623f7 A |
754 | |
755 | /* flags for m_copyback0 */ | |
756 | #define M_COPYBACK0_COPYBACK 0x0001 /* copyback from cp */ | |
757 | #define M_COPYBACK0_PRESERVE 0x0002 /* preserve original data */ | |
758 | #define M_COPYBACK0_COW 0x0004 /* do copy-on-write */ | |
759 | #define M_COPYBACK0_EXTEND 0x0008 /* extend chain */ | |
760 | ||
2d21ac55 A |
761 | /* |
762 | * This flag is set for all mbufs that come out of and into the composite | |
763 | * mbuf + cluster caches, i.e. MC_MBUF_CL and MC_MBUF_BIGCL. mbufs that | |
764 | * are marked with such a flag have clusters attached to them, and will be | |
765 | * treated differently when they are freed; instead of being placed back | |
766 | * into the mbuf and cluster freelists, the composite mbuf + cluster objects | |
767 | * are placed back into the appropriate composite cache's freelist, and the | |
768 | * actual freeing is deferred until the composite objects are purged. At | |
769 | * such a time, this flag will be cleared from the mbufs and the objects | |
770 | * will be freed into their own separate freelists. | |
771 | */ | |
772 | #define EXTF_COMPOSITE 0x1 | |
1c79356b | 773 | |
6d2010ae A |
774 | /* |
775 | * This flag indicates that the external cluster is read-only, i.e. it is | |
776 | * or was referred to by more than one mbufs. Once set, this flag is never | |
777 | * cleared. | |
778 | */ | |
779 | #define EXTF_READONLY 0x2 | |
39037602 A |
780 | /* |
781 | * This flag indicates that the external cluster is paired with the mbuf. | |
782 | * Pairing implies an external free routine defined which will be invoked | |
783 | * when the reference count drops to the minimum at m_free time. This | |
784 | * flag is never cleared. | |
785 | */ | |
786 | #define EXTF_PAIRED 0x4 | |
787 | ||
788 | #define EXTF_MASK \ | |
789 | (EXTF_COMPOSITE | EXTF_READONLY | EXTF_PAIRED) | |
6d2010ae | 790 | |
813fb2f6 A |
791 | #define MEXT_MINREF(m) ((m_get_rfa(m))->minref) |
792 | #define MEXT_REF(m) ((m_get_rfa(m))->refcnt) | |
793 | #define MEXT_PREF(m) ((m_get_rfa(m))->prefcnt) | |
794 | #define MEXT_FLAGS(m) ((m_get_rfa(m))->flags) | |
795 | #define MEXT_PRIV(m) ((m_get_rfa(m))->priv) | |
796 | #define MEXT_PMBUF(m) ((m_get_rfa(m))->paired) | |
797 | #define MEXT_TOKEN(m) ((m_get_rfa(m))->ext_token) | |
39037602 A |
798 | #define MBUF_IS_COMPOSITE(m) \ |
799 | (MEXT_REF(m) == MEXT_MINREF(m) && \ | |
800 | (MEXT_FLAGS(m) & EXTF_MASK) == EXTF_COMPOSITE) | |
801 | /* | |
802 | * This macro can be used to test if the mbuf is paired to an external | |
803 | * cluster. The test for MEXT_PMBUF being equal to the mbuf in subject | |
804 | * is important, as EXTF_PAIRED alone is insufficient since it is immutable, | |
805 | * and thus survives calls to m_free_paired. | |
806 | */ | |
807 | #define MBUF_IS_PAIRED(m) \ | |
808 | (((m)->m_flags & M_EXT) && \ | |
809 | (MEXT_FLAGS(m) & EXTF_MASK) == EXTF_PAIRED && \ | |
810 | MEXT_PMBUF(m) == (m)) | |
1c79356b | 811 | |
2d21ac55 A |
812 | /* |
813 | * Macros used to verify the integrity of the mbuf. | |
814 | */ | |
815 | #define _MCHECK(m) { \ | |
39037602 | 816 | if ((m)->m_type != MT_FREE && !MBUF_IS_PAIRED(m)) { \ |
2d21ac55 A |
817 | if (mclaudit == NULL) \ |
818 | panic("MCHECK: m_type=%d m=%p", \ | |
819 | (u_int16_t)(m)->m_type, m); \ | |
820 | else \ | |
821 | mcl_audit_mcheck_panic(m); \ | |
822 | } \ | |
823 | } | |
55e303ae | 824 | |
2d21ac55 | 825 | #define MBUF_IN_MAP(addr) \ |
3e170ce0 A |
826 | ((unsigned char *)(addr) >= mbutl && \ |
827 | (unsigned char *)(addr) < embutl) | |
55e303ae | 828 | |
2d21ac55 A |
829 | #define MRANGE(addr) { \ |
830 | if (!MBUF_IN_MAP(addr)) \ | |
831 | panic("MRANGE: address out of range 0x%p", addr); \ | |
1c79356b A |
832 | } |
833 | ||
834 | /* | |
2d21ac55 | 835 | * Macro version of mtod. |
1c79356b | 836 | */ |
2d21ac55 | 837 | #define MTOD(m, t) ((t)((m)->m_data)) |
1c79356b | 838 | |
2d21ac55 | 839 | /* |
3e170ce0 | 840 | * Macros to obtain page index given a base cluster address |
6d2010ae | 841 | */ |
3e170ce0 A |
842 | #define MTOPG(x) (((unsigned char *)x - mbutl) >> PAGE_SHIFT) |
843 | #define PGTOM(x) (mbutl + (x << PAGE_SHIFT)) | |
6d2010ae A |
844 | |
845 | /* | |
846 | * Macro to find the mbuf index relative to a base. | |
2d21ac55 | 847 | */ |
3e170ce0 A |
848 | #define MBPAGEIDX(c, m) \ |
849 | (((unsigned char *)(m) - (unsigned char *)(c)) >> MSIZESHIFT) | |
1c79356b | 850 | |
2d21ac55 | 851 | /* |
6d2010ae | 852 | * Same thing for 2KB cluster index. |
2d21ac55 | 853 | */ |
3e170ce0 A |
854 | #define CLPAGEIDX(c, m) \ |
855 | (((unsigned char *)(m) - (unsigned char *)(c)) >> MCLSHIFT) | |
856 | ||
857 | /* | |
858 | * Macro to find 4KB cluster index relative to a base | |
859 | */ | |
860 | #define BCLPAGEIDX(c, m) \ | |
861 | (((unsigned char *)(m) - (unsigned char *)(c)) >> MBIGCLSHIFT) | |
91447636 | 862 | |
2d21ac55 A |
863 | /* |
864 | * Macros used during mbuf and cluster initialization. | |
865 | */ | |
39236c6e A |
866 | #define MBUF_INIT_PKTHDR(m) { \ |
867 | (m)->m_pkthdr.rcvif = NULL; \ | |
868 | (m)->m_pkthdr.pkt_hdr = NULL; \ | |
869 | (m)->m_pkthdr.len = 0; \ | |
870 | (m)->m_pkthdr.csum_flags = 0; \ | |
871 | (m)->m_pkthdr.csum_data = 0; \ | |
872 | (m)->m_pkthdr.vlan_tag = 0; \ | |
873 | m_classifier_init(m, 0); \ | |
874 | m_tag_init(m, 1); \ | |
875 | m_scratch_init(m); \ | |
876 | m_redzone_init(m); \ | |
877 | } | |
878 | ||
2d21ac55 A |
879 | #define MBUF_INIT(m, pkthdr, type) { \ |
880 | _MCHECK(m); \ | |
881 | (m)->m_next = (m)->m_nextpkt = NULL; \ | |
882 | (m)->m_len = 0; \ | |
883 | (m)->m_type = type; \ | |
884 | if ((pkthdr) == 0) { \ | |
885 | (m)->m_data = (m)->m_dat; \ | |
886 | (m)->m_flags = 0; \ | |
887 | } else { \ | |
888 | (m)->m_data = (m)->m_pktdat; \ | |
889 | (m)->m_flags = M_PKTHDR; \ | |
39236c6e | 890 | MBUF_INIT_PKTHDR(m); \ |
2d21ac55 A |
891 | } \ |
892 | } | |
91447636 | 893 | |
39037602 A |
894 | #define MEXT_INIT(m, buf, size, free, arg, rfa, min, ref, pref, flag, \ |
895 | priv, pm) { \ | |
2d21ac55 A |
896 | (m)->m_data = (m)->m_ext.ext_buf = (buf); \ |
897 | (m)->m_flags |= M_EXT; \ | |
813fb2f6 | 898 | m_set_ext((m), (rfa), (free), (arg)); \ |
2d21ac55 | 899 | (m)->m_ext.ext_size = (size); \ |
39037602 | 900 | MEXT_MINREF(m) = (min); \ |
2d21ac55 | 901 | MEXT_REF(m) = (ref); \ |
39037602 | 902 | MEXT_PREF(m) = (pref); \ |
2d21ac55 | 903 | MEXT_FLAGS(m) = (flag); \ |
39037602 A |
904 | MEXT_PRIV(m) = (priv); \ |
905 | MEXT_PMBUF(m) = (pm); \ | |
1c79356b A |
906 | } |
907 | ||
2d21ac55 | 908 | #define MBUF_CL_INIT(m, buf, rfa, ref, flag) \ |
39037602 A |
909 | MEXT_INIT(m, buf, m_maxsize(MC_CL), NULL, NULL, rfa, 0, \ |
910 | ref, 0, flag, 0, NULL) | |
2d21ac55 A |
911 | |
912 | #define MBUF_BIGCL_INIT(m, buf, rfa, ref, flag) \ | |
39037602 A |
913 | MEXT_INIT(m, buf, m_maxsize(MC_BIGCL), m_bigfree, NULL, rfa, 0, \ |
914 | ref, 0, flag, 0, NULL) | |
2d21ac55 A |
915 | |
916 | #define MBUF_16KCL_INIT(m, buf, rfa, ref, flag) \ | |
39037602 A |
917 | MEXT_INIT(m, buf, m_maxsize(MC_16KCL), m_16kfree, NULL, rfa, 0, \ |
918 | ref, 0, flag, 0, NULL) | |
2d21ac55 | 919 | |
1c79356b | 920 | /* |
2d21ac55 | 921 | * Macro to convert BSD malloc sleep flag to mcache's |
1c79356b | 922 | */ |
2d21ac55 | 923 | #define MSLEEPF(f) ((!((f) & M_DONTWAIT)) ? MCR_SLEEP : MCR_NOSLEEP) |
1c79356b | 924 | |
2d21ac55 A |
925 | /* |
926 | * The structure that holds all mbuf class statistics exportable via sysctl. | |
927 | * Similar to mbstat structure, the mb_stat structure is protected by the | |
928 | * global mbuf lock. It contains additional information about the classes | |
929 | * that allows for a more accurate view of the state of the allocator. | |
930 | */ | |
931 | struct mb_stat *mb_stat; | |
b0d623f7 | 932 | struct omb_stat *omb_stat; /* For backwards compatibility */ |
1c79356b | 933 | |
2d21ac55 A |
934 | #define MB_STAT_SIZE(n) \ |
935 | ((size_t)(&((mb_stat_t *)0)->mbs_class[n])) | |
b0d623f7 A |
936 | #define OMB_STAT_SIZE(n) \ |
937 | ((size_t)(&((struct omb_stat *)0)->mbs_class[n])) | |
1c79356b A |
938 | |
939 | /* | |
2d21ac55 A |
940 | * The legacy structure holding all of the mbuf allocation statistics. |
941 | * The actual statistics used by the kernel are stored in the mbuf_table | |
942 | * instead, and are updated atomically while the global mbuf lock is held. | |
943 | * They are mirrored in mbstat to support legacy applications (e.g. netstat). | |
944 | * Unlike before, the kernel no longer relies on the contents of mbstat for | |
945 | * its operations (e.g. cluster expansion) because the structure is exposed | |
946 | * to outside and could possibly be modified, therefore making it unsafe. | |
947 | * With the exception of the mbstat.m_mtypes array (see below), all of the | |
948 | * statistics are updated as they change. | |
1c79356b | 949 | */ |
2d21ac55 | 950 | struct mbstat mbstat; |
1c79356b | 951 | |
2d21ac55 A |
952 | #define MBSTAT_MTYPES_MAX \ |
953 | (sizeof (mbstat.m_mtypes) / sizeof (mbstat.m_mtypes[0])) | |
1c79356b A |
954 | |
955 | /* | |
2d21ac55 A |
956 | * Allocation statistics related to mbuf types (up to MT_MAX-1) are updated |
957 | * atomically and stored in a per-CPU structure which is lock-free; this is | |
958 | * done in order to avoid writing to the global mbstat data structure which | |
959 | * would cause false sharing. During sysctl request for kern.ipc.mbstat, | |
960 | * the statistics across all CPUs will be converged into the mbstat.m_mtypes | |
961 | * array and returned to the application. Any updates for types greater or | |
962 | * equal than MT_MAX would be done atomically to the mbstat; this slows down | |
963 | * performance but is okay since the kernel uses only up to MT_MAX-1 while | |
964 | * anything beyond that (up to type 255) is considered a corner case. | |
1c79356b | 965 | */ |
2d21ac55 A |
966 | typedef struct { |
967 | unsigned int cpu_mtypes[MT_MAX]; | |
39236c6e | 968 | } __attribute__((aligned(MAX_CPU_CACHE_LINE_SIZE), packed)) mtypes_cpu_t; |
1c79356b | 969 | |
2d21ac55 A |
970 | typedef struct { |
971 | mtypes_cpu_t mbs_cpu[1]; | |
972 | } mbuf_mtypes_t; | |
1c79356b | 973 | |
2d21ac55 A |
974 | static mbuf_mtypes_t *mbuf_mtypes; /* per-CPU statistics */ |
975 | ||
976 | #define MBUF_MTYPES_SIZE(n) \ | |
977 | ((size_t)(&((mbuf_mtypes_t *)0)->mbs_cpu[n])) | |
978 | ||
979 | #define MTYPES_CPU(p) \ | |
316670eb | 980 | ((mtypes_cpu_t *)(void *)((char *)(p) + MBUF_MTYPES_SIZE(cpu_number()))) |
2d21ac55 | 981 | |
2d21ac55 A |
982 | #define mtype_stat_add(type, n) { \ |
983 | if ((unsigned)(type) < MT_MAX) { \ | |
984 | mtypes_cpu_t *mbs = MTYPES_CPU(mbuf_mtypes); \ | |
985 | atomic_add_32(&mbs->cpu_mtypes[type], n); \ | |
6d2010ae A |
986 | } else if ((unsigned)(type) < (unsigned)MBSTAT_MTYPES_MAX) { \ |
987 | atomic_add_16((int16_t *)&mbstat.m_mtypes[type], n); \ | |
2d21ac55 | 988 | } \ |
1c79356b A |
989 | } |
990 | ||
2d21ac55 A |
991 | #define mtype_stat_sub(t, n) mtype_stat_add(t, -(n)) |
992 | #define mtype_stat_inc(t) mtype_stat_add(t, 1) | |
993 | #define mtype_stat_dec(t) mtype_stat_sub(t, 1) | |
91447636 | 994 | |
6d2010ae A |
995 | static void |
996 | mbuf_mtypes_sync(boolean_t locked) | |
2d21ac55 | 997 | { |
2d21ac55 A |
998 | int m, n; |
999 | mtypes_cpu_t mtc; | |
1c79356b | 1000 | |
6d2010ae A |
1001 | if (locked) |
1002 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
1003 | ||
2d21ac55 A |
1004 | bzero(&mtc, sizeof (mtc)); |
1005 | for (m = 0; m < ncpu; m++) { | |
1006 | mtypes_cpu_t *scp = &mbuf_mtypes->mbs_cpu[m]; | |
1007 | mtypes_cpu_t temp; | |
9bccf70c | 1008 | |
2d21ac55 A |
1009 | bcopy(&scp->cpu_mtypes, &temp.cpu_mtypes, |
1010 | sizeof (temp.cpu_mtypes)); | |
91447636 | 1011 | |
2d21ac55 A |
1012 | for (n = 0; n < MT_MAX; n++) |
1013 | mtc.cpu_mtypes[n] += temp.cpu_mtypes[n]; | |
1014 | } | |
6d2010ae A |
1015 | if (!locked) |
1016 | lck_mtx_lock(mbuf_mlock); | |
2d21ac55 A |
1017 | for (n = 0; n < MT_MAX; n++) |
1018 | mbstat.m_mtypes[n] = mtc.cpu_mtypes[n]; | |
6d2010ae A |
1019 | if (!locked) |
1020 | lck_mtx_unlock(mbuf_mlock); | |
1c79356b A |
1021 | } |
1022 | ||
2d21ac55 | 1023 | static int |
6d2010ae | 1024 | mbstat_sysctl SYSCTL_HANDLER_ARGS |
1c79356b | 1025 | { |
2d21ac55 | 1026 | #pragma unused(oidp, arg1, arg2) |
6d2010ae A |
1027 | mbuf_mtypes_sync(FALSE); |
1028 | ||
1029 | return (SYSCTL_OUT(req, &mbstat, sizeof (mbstat))); | |
1030 | } | |
1031 | ||
1032 | static void | |
1033 | mbuf_stat_sync(void) | |
1034 | { | |
2d21ac55 | 1035 | mb_class_stat_t *sp; |
6d2010ae A |
1036 | mcache_cpu_t *ccp; |
1037 | mcache_t *cp; | |
1038 | int k, m, bktsize; | |
1039 | ||
1040 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
2d21ac55 | 1041 | |
2d21ac55 A |
1042 | for (k = 0; k < NELEM(mbuf_table); k++) { |
1043 | cp = m_cache(k); | |
1044 | ccp = &cp->mc_cpu[0]; | |
1045 | bktsize = ccp->cc_bktsize; | |
1046 | sp = mbuf_table[k].mtbl_stats; | |
1047 | ||
1048 | if (cp->mc_flags & MCF_NOCPUCACHE) | |
1049 | sp->mbcl_mc_state = MCS_DISABLED; | |
1050 | else if (cp->mc_purge_cnt > 0) | |
1051 | sp->mbcl_mc_state = MCS_PURGING; | |
1052 | else if (bktsize == 0) | |
1053 | sp->mbcl_mc_state = MCS_OFFLINE; | |
1054 | else | |
1055 | sp->mbcl_mc_state = MCS_ONLINE; | |
1056 | ||
1057 | sp->mbcl_mc_cached = 0; | |
1058 | for (m = 0; m < ncpu; m++) { | |
1059 | ccp = &cp->mc_cpu[m]; | |
1060 | if (ccp->cc_objs > 0) | |
1061 | sp->mbcl_mc_cached += ccp->cc_objs; | |
1062 | if (ccp->cc_pobjs > 0) | |
1063 | sp->mbcl_mc_cached += ccp->cc_pobjs; | |
1064 | } | |
1065 | sp->mbcl_mc_cached += (cp->mc_full.bl_total * bktsize); | |
1066 | sp->mbcl_active = sp->mbcl_total - sp->mbcl_mc_cached - | |
1067 | sp->mbcl_infree; | |
1068 | ||
1069 | sp->mbcl_mc_waiter_cnt = cp->mc_waiter_cnt; | |
1070 | sp->mbcl_mc_wretry_cnt = cp->mc_wretry_cnt; | |
1071 | sp->mbcl_mc_nwretry_cnt = cp->mc_nwretry_cnt; | |
1072 | ||
1073 | /* Calculate total count specific to each class */ | |
1074 | sp->mbcl_ctotal = sp->mbcl_total; | |
1075 | switch (m_class(k)) { | |
1076 | case MC_MBUF: | |
1077 | /* Deduct mbufs used in composite caches */ | |
1078 | sp->mbcl_ctotal -= (m_total(MC_MBUF_CL) + | |
1079 | m_total(MC_MBUF_BIGCL)); | |
1080 | break; | |
91447636 | 1081 | |
2d21ac55 | 1082 | case MC_CL: |
6d2010ae A |
1083 | /* Deduct clusters used in composite cache */ |
1084 | sp->mbcl_ctotal -= m_total(MC_MBUF_CL); | |
2d21ac55 | 1085 | break; |
91447636 | 1086 | |
2d21ac55 A |
1087 | case MC_BIGCL: |
1088 | /* Deduct clusters used in composite cache */ | |
1089 | sp->mbcl_ctotal -= m_total(MC_MBUF_BIGCL); | |
1090 | break; | |
1c79356b | 1091 | |
2d21ac55 A |
1092 | case MC_16KCL: |
1093 | /* Deduct clusters used in composite cache */ | |
1094 | sp->mbcl_ctotal -= m_total(MC_MBUF_16KCL); | |
1095 | break; | |
1096 | ||
1097 | default: | |
1098 | break; | |
1099 | } | |
1100 | } | |
6d2010ae A |
1101 | } |
1102 | ||
1103 | static int | |
1104 | mb_stat_sysctl SYSCTL_HANDLER_ARGS | |
1105 | { | |
1106 | #pragma unused(oidp, arg1, arg2) | |
1107 | void *statp; | |
1108 | int k, statsz, proc64 = proc_is64bit(req->p); | |
1109 | ||
1110 | lck_mtx_lock(mbuf_mlock); | |
1111 | mbuf_stat_sync(); | |
b0d623f7 A |
1112 | |
1113 | if (!proc64) { | |
1114 | struct omb_class_stat *oc; | |
1115 | struct mb_class_stat *c; | |
1116 | ||
1117 | omb_stat->mbs_cnt = mb_stat->mbs_cnt; | |
1118 | oc = &omb_stat->mbs_class[0]; | |
1119 | c = &mb_stat->mbs_class[0]; | |
1120 | for (k = 0; k < omb_stat->mbs_cnt; k++, oc++, c++) { | |
1121 | (void) snprintf(oc->mbcl_cname, sizeof (oc->mbcl_cname), | |
1122 | "%s", c->mbcl_cname); | |
1123 | oc->mbcl_size = c->mbcl_size; | |
1124 | oc->mbcl_total = c->mbcl_total; | |
1125 | oc->mbcl_active = c->mbcl_active; | |
1126 | oc->mbcl_infree = c->mbcl_infree; | |
1127 | oc->mbcl_slab_cnt = c->mbcl_slab_cnt; | |
1128 | oc->mbcl_alloc_cnt = c->mbcl_alloc_cnt; | |
1129 | oc->mbcl_free_cnt = c->mbcl_free_cnt; | |
1130 | oc->mbcl_notified = c->mbcl_notified; | |
1131 | oc->mbcl_purge_cnt = c->mbcl_purge_cnt; | |
1132 | oc->mbcl_fail_cnt = c->mbcl_fail_cnt; | |
1133 | oc->mbcl_ctotal = c->mbcl_ctotal; | |
fe8ab488 | 1134 | oc->mbcl_release_cnt = c->mbcl_release_cnt; |
b0d623f7 A |
1135 | oc->mbcl_mc_state = c->mbcl_mc_state; |
1136 | oc->mbcl_mc_cached = c->mbcl_mc_cached; | |
1137 | oc->mbcl_mc_waiter_cnt = c->mbcl_mc_waiter_cnt; | |
1138 | oc->mbcl_mc_wretry_cnt = c->mbcl_mc_wretry_cnt; | |
1139 | oc->mbcl_mc_nwretry_cnt = c->mbcl_mc_nwretry_cnt; | |
1140 | } | |
1141 | statp = omb_stat; | |
1142 | statsz = OMB_STAT_SIZE(NELEM(mbuf_table)); | |
1143 | } else { | |
1144 | statp = mb_stat; | |
1145 | statsz = MB_STAT_SIZE(NELEM(mbuf_table)); | |
1146 | } | |
1147 | ||
2d21ac55 | 1148 | lck_mtx_unlock(mbuf_mlock); |
9bccf70c | 1149 | |
b0d623f7 | 1150 | return (SYSCTL_OUT(req, statp, statsz)); |
2d21ac55 | 1151 | } |
91447636 | 1152 | |
6d2010ae A |
1153 | static int |
1154 | mleak_top_trace_sysctl SYSCTL_HANDLER_ARGS | |
1155 | { | |
1156 | #pragma unused(oidp, arg1, arg2) | |
6d2010ae A |
1157 | int i; |
1158 | ||
1159 | /* Ensure leak tracing turned on */ | |
316670eb | 1160 | if (!mclfindleak || !mclexpleak) |
6d2010ae A |
1161 | return (ENXIO); |
1162 | ||
6d2010ae | 1163 | lck_mtx_lock(mleak_lock); |
316670eb | 1164 | mleak_update_stats(); |
6d2010ae A |
1165 | i = SYSCTL_OUT(req, mleak_stat, MLEAK_STAT_SIZE(MLEAK_NUM_TRACES)); |
1166 | lck_mtx_unlock(mleak_lock); | |
1167 | ||
1168 | return (i); | |
1169 | } | |
1170 | ||
1171 | static int | |
1172 | mleak_table_sysctl SYSCTL_HANDLER_ARGS | |
1173 | { | |
1174 | #pragma unused(oidp, arg1, arg2) | |
1175 | int i = 0; | |
1176 | ||
1177 | /* Ensure leak tracing turned on */ | |
316670eb | 1178 | if (!mclfindleak || !mclexpleak) |
6d2010ae A |
1179 | return (ENXIO); |
1180 | ||
1181 | lck_mtx_lock(mleak_lock); | |
1182 | i = SYSCTL_OUT(req, &mleak_table, sizeof (mleak_table)); | |
1183 | lck_mtx_unlock(mleak_lock); | |
1184 | ||
1185 | return (i); | |
1186 | } | |
1187 | ||
2d21ac55 A |
1188 | static inline void |
1189 | m_incref(struct mbuf *m) | |
1190 | { | |
39037602 A |
1191 | UInt16 old, new; |
1192 | volatile UInt16 *addr = (volatile UInt16 *)&MEXT_REF(m); | |
91447636 | 1193 | |
2d21ac55 A |
1194 | do { |
1195 | old = *addr; | |
1196 | new = old + 1; | |
1197 | ASSERT(new != 0); | |
39037602 | 1198 | } while (!OSCompareAndSwap16(old, new, addr)); |
6d2010ae A |
1199 | |
1200 | /* | |
1201 | * If cluster is shared, mark it with (sticky) EXTF_READONLY; | |
39037602 A |
1202 | * we don't clear the flag when the refcount goes back to the |
1203 | * minimum, to simplify code calling m_mclhasreference(). | |
6d2010ae | 1204 | */ |
39037602 A |
1205 | if (new > (MEXT_MINREF(m) + 1) && !(MEXT_FLAGS(m) & EXTF_READONLY)) |
1206 | (void) OSBitOrAtomic16(EXTF_READONLY, &MEXT_FLAGS(m)); | |
1c79356b A |
1207 | } |
1208 | ||
39037602 | 1209 | static inline u_int16_t |
2d21ac55 | 1210 | m_decref(struct mbuf *m) |
1c79356b | 1211 | { |
39037602 A |
1212 | UInt16 old, new; |
1213 | volatile UInt16 *addr = (volatile UInt16 *)&MEXT_REF(m); | |
1c79356b | 1214 | |
2d21ac55 A |
1215 | do { |
1216 | old = *addr; | |
1217 | new = old - 1; | |
1218 | ASSERT(old != 0); | |
39037602 | 1219 | } while (!OSCompareAndSwap16(old, new, addr)); |
2d21ac55 A |
1220 | |
1221 | return (new); | |
1c79356b A |
1222 | } |
1223 | ||
2d21ac55 A |
1224 | static void |
1225 | mbuf_table_init(void) | |
1c79356b | 1226 | { |
6d2010ae | 1227 | unsigned int b, c, s; |
3e170ce0 | 1228 | int m, config_mbuf_jumbo = 0; |
91447636 | 1229 | |
b0d623f7 A |
1230 | MALLOC(omb_stat, struct omb_stat *, OMB_STAT_SIZE(NELEM(mbuf_table)), |
1231 | M_TEMP, M_WAITOK | M_ZERO); | |
1232 | VERIFY(omb_stat != NULL); | |
1233 | ||
2d21ac55 A |
1234 | MALLOC(mb_stat, mb_stat_t *, MB_STAT_SIZE(NELEM(mbuf_table)), |
1235 | M_TEMP, M_WAITOK | M_ZERO); | |
1236 | VERIFY(mb_stat != NULL); | |
1c79356b | 1237 | |
2d21ac55 A |
1238 | mb_stat->mbs_cnt = NELEM(mbuf_table); |
1239 | for (m = 0; m < NELEM(mbuf_table); m++) | |
1240 | mbuf_table[m].mtbl_stats = &mb_stat->mbs_class[m]; | |
1c79356b | 1241 | |
2d21ac55 | 1242 | #if CONFIG_MBUF_JUMBO |
3e170ce0 | 1243 | config_mbuf_jumbo = 1; |
2d21ac55 | 1244 | #endif /* CONFIG_MBUF_JUMBO */ |
9bccf70c | 1245 | |
3e170ce0 A |
1246 | if (config_mbuf_jumbo == 1 || PAGE_SIZE == M16KCLBYTES) { |
1247 | /* | |
1248 | * Set aside 1/3 of the mbuf cluster map for jumbo | |
1249 | * clusters; we do this only on platforms where jumbo | |
1250 | * cluster pool is enabled. | |
1251 | */ | |
1252 | njcl = nmbclusters / 3; | |
1253 | njclbytes = M16KCLBYTES; | |
1254 | } | |
1255 | ||
2d21ac55 | 1256 | /* |
6d2010ae A |
1257 | * nclusters holds both the 2KB and 4KB pools, so ensure it's |
1258 | * a multiple of 4KB clusters. | |
2d21ac55 | 1259 | */ |
3e170ce0 | 1260 | nclusters = P2ROUNDDOWN(nmbclusters - njcl, NCLPG); |
2d21ac55 A |
1261 | if (njcl > 0) { |
1262 | /* | |
6d2010ae A |
1263 | * Each jumbo cluster takes 8 2KB clusters, so make |
1264 | * sure that the pool size is evenly divisible by 8; | |
1265 | * njcl is in 2KB unit, hence treated as such. | |
2d21ac55 | 1266 | */ |
3e170ce0 | 1267 | njcl = P2ROUNDDOWN(nmbclusters - nclusters, NCLPJCL); |
1c79356b | 1268 | |
6d2010ae | 1269 | /* Update nclusters with rounded down value of njcl */ |
3e170ce0 | 1270 | nclusters = P2ROUNDDOWN(nmbclusters - njcl, NCLPG); |
9bccf70c | 1271 | } |
2d21ac55 A |
1272 | |
1273 | /* | |
3e170ce0 A |
1274 | * njcl is valid only on platforms with 16KB jumbo clusters or |
1275 | * with 16KB pages, where it is configured to 1/3 of the pool | |
1276 | * size. On these platforms, the remaining is used for 2KB | |
1277 | * and 4KB clusters. On platforms without 16KB jumbo clusters, | |
1278 | * the entire pool is used for both 2KB and 4KB clusters. A 4KB | |
1279 | * cluster can either be splitted into 16 mbufs, or into 2 2KB | |
1280 | * clusters. | |
6d2010ae A |
1281 | * |
1282 | * +---+---+------------ ... -----------+------- ... -------+ | |
1283 | * | c | b | s | njcl | | |
1284 | * +---+---+------------ ... -----------+------- ... -------+ | |
1285 | * | |
1286 | * 1/32th of the shared region is reserved for pure 2KB and 4KB | |
1287 | * clusters (1/64th each.) | |
1288 | */ | |
3e170ce0 A |
1289 | c = P2ROUNDDOWN((nclusters >> 6), NCLPG); /* in 2KB unit */ |
1290 | b = P2ROUNDDOWN((nclusters >> (6 + NCLPBGSHIFT)), NBCLPG); /* in 4KB unit */ | |
6d2010ae A |
1291 | s = nclusters - (c + (b << NCLPBGSHIFT)); /* in 2KB unit */ |
1292 | ||
1293 | /* | |
1294 | * 1/64th (c) is reserved for 2KB clusters. | |
2d21ac55 | 1295 | */ |
6d2010ae A |
1296 | m_minlimit(MC_CL) = c; |
1297 | m_maxlimit(MC_CL) = s + c; /* in 2KB unit */ | |
2d21ac55 A |
1298 | m_maxsize(MC_CL) = m_size(MC_CL) = MCLBYTES; |
1299 | (void) snprintf(m_cname(MC_CL), MAX_MBUF_CNAME, "cl"); | |
1300 | ||
1301 | /* | |
6d2010ae A |
1302 | * Another 1/64th (b) of the map is reserved for 4KB clusters. |
1303 | * It cannot be turned into 2KB clusters or mbufs. | |
2d21ac55 | 1304 | */ |
6d2010ae A |
1305 | m_minlimit(MC_BIGCL) = b; |
1306 | m_maxlimit(MC_BIGCL) = (s >> NCLPBGSHIFT) + b; /* in 4KB unit */ | |
1307 | m_maxsize(MC_BIGCL) = m_size(MC_BIGCL) = MBIGCLBYTES; | |
1308 | (void) snprintf(m_cname(MC_BIGCL), MAX_MBUF_CNAME, "bigcl"); | |
2d21ac55 A |
1309 | |
1310 | /* | |
6d2010ae | 1311 | * The remaining 31/32ths (s) are all-purpose (mbufs, 2KB, or 4KB) |
2d21ac55 | 1312 | */ |
6d2010ae A |
1313 | m_minlimit(MC_MBUF) = 0; |
1314 | m_maxlimit(MC_MBUF) = (s << NMBPCLSHIFT); /* in mbuf unit */ | |
1315 | m_maxsize(MC_MBUF) = m_size(MC_MBUF) = MSIZE; | |
1316 | (void) snprintf(m_cname(MC_MBUF), MAX_MBUF_CNAME, "mbuf"); | |
2d21ac55 A |
1317 | |
1318 | /* | |
1319 | * Set limits for the composite classes. | |
1320 | */ | |
1321 | m_minlimit(MC_MBUF_CL) = 0; | |
6d2010ae | 1322 | m_maxlimit(MC_MBUF_CL) = m_maxlimit(MC_CL); |
2d21ac55 A |
1323 | m_maxsize(MC_MBUF_CL) = MCLBYTES; |
1324 | m_size(MC_MBUF_CL) = m_size(MC_MBUF) + m_size(MC_CL); | |
1325 | (void) snprintf(m_cname(MC_MBUF_CL), MAX_MBUF_CNAME, "mbuf_cl"); | |
1326 | ||
1327 | m_minlimit(MC_MBUF_BIGCL) = 0; | |
1328 | m_maxlimit(MC_MBUF_BIGCL) = m_maxlimit(MC_BIGCL); | |
6d2010ae | 1329 | m_maxsize(MC_MBUF_BIGCL) = MBIGCLBYTES; |
2d21ac55 A |
1330 | m_size(MC_MBUF_BIGCL) = m_size(MC_MBUF) + m_size(MC_BIGCL); |
1331 | (void) snprintf(m_cname(MC_MBUF_BIGCL), MAX_MBUF_CNAME, "mbuf_bigcl"); | |
1332 | ||
1333 | /* | |
1334 | * And for jumbo classes. | |
1335 | */ | |
1336 | m_minlimit(MC_16KCL) = 0; | |
6d2010ae | 1337 | m_maxlimit(MC_16KCL) = (njcl >> NCLPJCLSHIFT); /* in 16KB unit */ |
2d21ac55 A |
1338 | m_maxsize(MC_16KCL) = m_size(MC_16KCL) = M16KCLBYTES; |
1339 | (void) snprintf(m_cname(MC_16KCL), MAX_MBUF_CNAME, "16kcl"); | |
1340 | ||
1341 | m_minlimit(MC_MBUF_16KCL) = 0; | |
1342 | m_maxlimit(MC_MBUF_16KCL) = m_maxlimit(MC_16KCL); | |
1343 | m_maxsize(MC_MBUF_16KCL) = M16KCLBYTES; | |
1344 | m_size(MC_MBUF_16KCL) = m_size(MC_MBUF) + m_size(MC_16KCL); | |
1345 | (void) snprintf(m_cname(MC_MBUF_16KCL), MAX_MBUF_CNAME, "mbuf_16kcl"); | |
1346 | ||
1347 | /* | |
1348 | * Initialize the legacy mbstat structure. | |
1349 | */ | |
1350 | bzero(&mbstat, sizeof (mbstat)); | |
1351 | mbstat.m_msize = m_maxsize(MC_MBUF); | |
1352 | mbstat.m_mclbytes = m_maxsize(MC_CL); | |
1353 | mbstat.m_minclsize = MINCLSIZE; | |
1354 | mbstat.m_mlen = MLEN; | |
1355 | mbstat.m_mhlen = MHLEN; | |
1356 | mbstat.m_bigmclbytes = m_maxsize(MC_BIGCL); | |
1357 | } | |
1358 | ||
b0d623f7 A |
1359 | #if defined(__LP64__) |
1360 | typedef struct ncl_tbl { | |
1361 | uint64_t nt_maxmem; /* memory (sane) size */ | |
1362 | uint32_t nt_mbpool; /* mbuf pool size */ | |
1363 | } ncl_tbl_t; | |
1364 | ||
1365 | /* Non-server */ | |
1366 | static ncl_tbl_t ncl_table[] = { | |
316670eb | 1367 | { (1ULL << GBSHIFT) /* 1 GB */, (64 << MBSHIFT) /* 64 MB */ }, |
b0d623f7 A |
1368 | { (1ULL << (GBSHIFT + 3)) /* 8 GB */, (96 << MBSHIFT) /* 96 MB */ }, |
1369 | { (1ULL << (GBSHIFT + 4)) /* 16 GB */, (128 << MBSHIFT) /* 128 MB */ }, | |
1370 | { 0, 0 } | |
1371 | }; | |
1372 | ||
1373 | /* Server */ | |
1374 | static ncl_tbl_t ncl_table_srv[] = { | |
316670eb | 1375 | { (1ULL << GBSHIFT) /* 1 GB */, (96 << MBSHIFT) /* 96 MB */ }, |
b0d623f7 A |
1376 | { (1ULL << (GBSHIFT + 2)) /* 4 GB */, (128 << MBSHIFT) /* 128 MB */ }, |
1377 | { (1ULL << (GBSHIFT + 3)) /* 8 GB */, (160 << MBSHIFT) /* 160 MB */ }, | |
1378 | { (1ULL << (GBSHIFT + 4)) /* 16 GB */, (192 << MBSHIFT) /* 192 MB */ }, | |
1379 | { (1ULL << (GBSHIFT + 5)) /* 32 GB */, (256 << MBSHIFT) /* 256 MB */ }, | |
1380 | { (1ULL << (GBSHIFT + 6)) /* 64 GB */, (384 << MBSHIFT) /* 384 MB */ }, | |
1381 | { 0, 0 } | |
1382 | }; | |
1383 | #endif /* __LP64__ */ | |
1384 | ||
1385 | __private_extern__ unsigned int | |
6d2010ae | 1386 | mbuf_default_ncl(int server, uint64_t mem) |
b0d623f7 A |
1387 | { |
1388 | #if !defined(__LP64__) | |
6d2010ae | 1389 | #pragma unused(server) |
b0d623f7 A |
1390 | unsigned int n; |
1391 | /* | |
1392 | * 32-bit kernel (default to 64MB of mbuf pool for >= 1GB RAM). | |
1393 | */ | |
6d2010ae A |
1394 | if ((n = ((mem / 16) / MCLBYTES)) > 32768) |
1395 | n = 32768; | |
b0d623f7 A |
1396 | #else |
1397 | unsigned int n, i; | |
6d2010ae | 1398 | ncl_tbl_t *tbl = (server ? ncl_table_srv : ncl_table); |
b0d623f7 A |
1399 | /* |
1400 | * 64-bit kernel (mbuf pool size based on table). | |
1401 | */ | |
1402 | n = tbl[0].nt_mbpool; | |
1403 | for (i = 0; tbl[i].nt_mbpool != 0; i++) { | |
1404 | if (mem < tbl[i].nt_maxmem) | |
1405 | break; | |
1406 | n = tbl[i].nt_mbpool; | |
1407 | } | |
1408 | n >>= MCLSHIFT; | |
1409 | #endif /* !__LP64__ */ | |
1410 | return (n); | |
1411 | } | |
1412 | ||
2d21ac55 A |
1413 | __private_extern__ void |
1414 | mbinit(void) | |
1415 | { | |
1416 | unsigned int m; | |
6d2010ae | 1417 | unsigned int initmcl = 0; |
2d21ac55 | 1418 | void *buf; |
b0d623f7 | 1419 | thread_t thread = THREAD_NULL; |
2d21ac55 | 1420 | |
39236c6e A |
1421 | microuptime(&mb_start); |
1422 | ||
316670eb A |
1423 | /* |
1424 | * These MBUF_ values must be equal to their private counterparts. | |
1425 | */ | |
1426 | _CASSERT(MBUF_EXT == M_EXT); | |
1427 | _CASSERT(MBUF_PKTHDR == M_PKTHDR); | |
1428 | _CASSERT(MBUF_EOR == M_EOR); | |
1429 | _CASSERT(MBUF_LOOP == M_LOOP); | |
1430 | _CASSERT(MBUF_BCAST == M_BCAST); | |
1431 | _CASSERT(MBUF_MCAST == M_MCAST); | |
1432 | _CASSERT(MBUF_FRAG == M_FRAG); | |
1433 | _CASSERT(MBUF_FIRSTFRAG == M_FIRSTFRAG); | |
1434 | _CASSERT(MBUF_LASTFRAG == M_LASTFRAG); | |
1435 | _CASSERT(MBUF_PROMISC == M_PROMISC); | |
1436 | _CASSERT(MBUF_HASFCS == M_HASFCS); | |
1437 | ||
1438 | _CASSERT(MBUF_TYPE_FREE == MT_FREE); | |
1439 | _CASSERT(MBUF_TYPE_DATA == MT_DATA); | |
1440 | _CASSERT(MBUF_TYPE_HEADER == MT_HEADER); | |
1441 | _CASSERT(MBUF_TYPE_SOCKET == MT_SOCKET); | |
1442 | _CASSERT(MBUF_TYPE_PCB == MT_PCB); | |
1443 | _CASSERT(MBUF_TYPE_RTABLE == MT_RTABLE); | |
1444 | _CASSERT(MBUF_TYPE_HTABLE == MT_HTABLE); | |
1445 | _CASSERT(MBUF_TYPE_ATABLE == MT_ATABLE); | |
1446 | _CASSERT(MBUF_TYPE_SONAME == MT_SONAME); | |
1447 | _CASSERT(MBUF_TYPE_SOOPTS == MT_SOOPTS); | |
1448 | _CASSERT(MBUF_TYPE_FTABLE == MT_FTABLE); | |
1449 | _CASSERT(MBUF_TYPE_RIGHTS == MT_RIGHTS); | |
1450 | _CASSERT(MBUF_TYPE_IFADDR == MT_IFADDR); | |
1451 | _CASSERT(MBUF_TYPE_CONTROL == MT_CONTROL); | |
1452 | _CASSERT(MBUF_TYPE_OOBDATA == MT_OOBDATA); | |
1453 | ||
1454 | _CASSERT(MBUF_TSO_IPV4 == CSUM_TSO_IPV4); | |
1455 | _CASSERT(MBUF_TSO_IPV6 == CSUM_TSO_IPV6); | |
39236c6e | 1456 | _CASSERT(MBUF_CSUM_REQ_SUM16 == CSUM_PARTIAL); |
316670eb A |
1457 | _CASSERT(MBUF_CSUM_TCP_SUM16 == MBUF_CSUM_REQ_SUM16); |
1458 | _CASSERT(MBUF_CSUM_REQ_IP == CSUM_IP); | |
1459 | _CASSERT(MBUF_CSUM_REQ_TCP == CSUM_TCP); | |
1460 | _CASSERT(MBUF_CSUM_REQ_UDP == CSUM_UDP); | |
1461 | _CASSERT(MBUF_CSUM_REQ_TCPIPV6 == CSUM_TCPIPV6); | |
1462 | _CASSERT(MBUF_CSUM_REQ_UDPIPV6 == CSUM_UDPIPV6); | |
1463 | _CASSERT(MBUF_CSUM_DID_IP == CSUM_IP_CHECKED); | |
1464 | _CASSERT(MBUF_CSUM_IP_GOOD == CSUM_IP_VALID); | |
1465 | _CASSERT(MBUF_CSUM_DID_DATA == CSUM_DATA_VALID); | |
1466 | _CASSERT(MBUF_CSUM_PSEUDO_HDR == CSUM_PSEUDO_HDR); | |
1467 | ||
1468 | _CASSERT(MBUF_WAITOK == M_WAIT); | |
1469 | _CASSERT(MBUF_DONTWAIT == M_DONTWAIT); | |
1470 | _CASSERT(MBUF_COPYALL == M_COPYALL); | |
1471 | ||
316670eb A |
1472 | _CASSERT(MBUF_SC2TC(MBUF_SC_BK_SYS) == MBUF_TC_BK); |
1473 | _CASSERT(MBUF_SC2TC(MBUF_SC_BK) == MBUF_TC_BK); | |
1474 | _CASSERT(MBUF_SC2TC(MBUF_SC_BE) == MBUF_TC_BE); | |
1475 | _CASSERT(MBUF_SC2TC(MBUF_SC_RD) == MBUF_TC_BE); | |
1476 | _CASSERT(MBUF_SC2TC(MBUF_SC_OAM) == MBUF_TC_BE); | |
1477 | _CASSERT(MBUF_SC2TC(MBUF_SC_AV) == MBUF_TC_VI); | |
1478 | _CASSERT(MBUF_SC2TC(MBUF_SC_RV) == MBUF_TC_VI); | |
1479 | _CASSERT(MBUF_SC2TC(MBUF_SC_VI) == MBUF_TC_VI); | |
1480 | _CASSERT(MBUF_SC2TC(MBUF_SC_VO) == MBUF_TC_VO); | |
1481 | _CASSERT(MBUF_SC2TC(MBUF_SC_CTL) == MBUF_TC_VO); | |
1482 | ||
1483 | _CASSERT(MBUF_TC2SCVAL(MBUF_TC_BK) == SCVAL_BK); | |
1484 | _CASSERT(MBUF_TC2SCVAL(MBUF_TC_BE) == SCVAL_BE); | |
1485 | _CASSERT(MBUF_TC2SCVAL(MBUF_TC_VI) == SCVAL_VI); | |
1486 | _CASSERT(MBUF_TC2SCVAL(MBUF_TC_VO) == SCVAL_VO); | |
1487 | ||
39236c6e A |
1488 | /* Module specific scratch space (32-bit alignment requirement) */ |
1489 | _CASSERT(!(offsetof(struct mbuf, m_pkthdr.pkt_mpriv) % | |
1490 | sizeof (uint32_t))); | |
1491 | ||
1492 | /* Initialize random red zone cookie value */ | |
1493 | _CASSERT(sizeof (mb_redzone_cookie) == | |
1494 | sizeof (((struct pkthdr *)0)->redzone)); | |
1495 | read_random(&mb_redzone_cookie, sizeof (mb_redzone_cookie)); | |
813fb2f6 A |
1496 | read_random(&mb_obscure_extref, sizeof (mb_obscure_extref)); |
1497 | read_random(&mb_obscure_extfree, sizeof (mb_obscure_extfree)); | |
1498 | mb_obscure_extref |= 0x3; | |
1499 | mb_obscure_extfree |= 0x3; | |
39236c6e A |
1500 | |
1501 | /* Make sure we don't save more than we should */ | |
1502 | _CASSERT(MCA_SAVED_MBUF_SIZE <= sizeof (struct mbuf)); | |
1503 | ||
2d21ac55 A |
1504 | if (nmbclusters == 0) |
1505 | nmbclusters = NMBCLUSTERS; | |
1506 | ||
6d2010ae A |
1507 | /* This should be a sane (at least even) value by now */ |
1508 | VERIFY(nmbclusters != 0 && !(nmbclusters & 0x1)); | |
1509 | ||
2d21ac55 A |
1510 | /* Setup the mbuf table */ |
1511 | mbuf_table_init(); | |
1512 | ||
1513 | /* Global lock for common layer */ | |
1514 | mbuf_mlock_grp_attr = lck_grp_attr_alloc_init(); | |
1515 | mbuf_mlock_grp = lck_grp_alloc_init("mbuf", mbuf_mlock_grp_attr); | |
1516 | mbuf_mlock_attr = lck_attr_alloc_init(); | |
316670eb | 1517 | lck_mtx_init(mbuf_mlock, mbuf_mlock_grp, mbuf_mlock_attr); |
2d21ac55 | 1518 | |
6d2010ae A |
1519 | /* |
1520 | * Allocate cluster slabs table: | |
1521 | * | |
1522 | * maxslabgrp = (N * 2048) / (1024 * 1024) | |
1523 | * | |
1524 | * Where N is nmbclusters rounded up to the nearest 512. This yields | |
1525 | * mcl_slab_g_t units, each one representing a MB of memory. | |
1526 | */ | |
1527 | maxslabgrp = | |
3e170ce0 | 1528 | (P2ROUNDUP(nmbclusters, (MBSIZE >> MCLSHIFT)) << MCLSHIFT) >> MBSHIFT; |
2d21ac55 A |
1529 | MALLOC(slabstbl, mcl_slabg_t **, maxslabgrp * sizeof (mcl_slabg_t *), |
1530 | M_TEMP, M_WAITOK | M_ZERO); | |
1531 | VERIFY(slabstbl != NULL); | |
1532 | ||
6d2010ae A |
1533 | /* |
1534 | * Allocate audit structures, if needed: | |
1535 | * | |
3e170ce0 | 1536 | * maxclaudit = (maxslabgrp * 1024 * 1024) / PAGE_SIZE |
6d2010ae A |
1537 | * |
1538 | * This yields mcl_audit_t units, each one representing a page. | |
1539 | */ | |
593a1d5f | 1540 | PE_parse_boot_argn("mbuf_debug", &mbuf_debug, sizeof (mbuf_debug)); |
2d21ac55 | 1541 | mbuf_debug |= mcache_getflags(); |
6d2010ae | 1542 | if (mbuf_debug & MCF_DEBUG) { |
3e170ce0 A |
1543 | int l; |
1544 | mcl_audit_t *mclad; | |
1545 | maxclaudit = ((maxslabgrp << MBSHIFT) >> PAGE_SHIFT); | |
6d2010ae A |
1546 | MALLOC(mclaudit, mcl_audit_t *, maxclaudit * sizeof (*mclaudit), |
1547 | M_TEMP, M_WAITOK | M_ZERO); | |
2d21ac55 | 1548 | VERIFY(mclaudit != NULL); |
3e170ce0 A |
1549 | for (l = 0, mclad = mclaudit; l < maxclaudit; l++) { |
1550 | MALLOC(mclad[l].cl_audit, mcache_audit_t **, | |
1551 | NMBPG * sizeof(mcache_audit_t *), | |
1552 | M_TEMP, M_WAITOK | M_ZERO); | |
1553 | VERIFY(mclad[l].cl_audit != NULL); | |
1554 | } | |
2d21ac55 A |
1555 | |
1556 | mcl_audit_con_cache = mcache_create("mcl_audit_contents", | |
39236c6e | 1557 | AUDIT_CONTENTS_SIZE, sizeof (u_int64_t), 0, MCR_SLEEP); |
2d21ac55 A |
1558 | VERIFY(mcl_audit_con_cache != NULL); |
1559 | } | |
6d2010ae A |
1560 | mclverify = (mbuf_debug & MCF_VERIFY); |
1561 | mcltrace = (mbuf_debug & MCF_TRACE); | |
1562 | mclfindleak = !(mbuf_debug & MCF_NOLEAKLOG); | |
316670eb | 1563 | mclexpleak = mclfindleak && (mbuf_debug & MCF_EXPLEAKLOG); |
6d2010ae A |
1564 | |
1565 | /* Enable mbuf leak logging, with a lock to protect the tables */ | |
1566 | ||
1567 | mleak_lock_grp_attr = lck_grp_attr_alloc_init(); | |
1568 | mleak_lock_grp = lck_grp_alloc_init("mleak_lock", mleak_lock_grp_attr); | |
1569 | mleak_lock_attr = lck_attr_alloc_init(); | |
316670eb | 1570 | lck_mtx_init(mleak_lock, mleak_lock_grp, mleak_lock_attr); |
6d2010ae A |
1571 | |
1572 | mleak_activate(); | |
2d21ac55 A |
1573 | |
1574 | /* Calculate the number of pages assigned to the cluster pool */ | |
3e170ce0 | 1575 | mcl_pages = (nmbclusters << MCLSHIFT) / PAGE_SIZE; |
b0d623f7 A |
1576 | MALLOC(mcl_paddr, ppnum_t *, mcl_pages * sizeof (ppnum_t), |
1577 | M_TEMP, M_WAITOK); | |
2d21ac55 A |
1578 | VERIFY(mcl_paddr != NULL); |
1579 | ||
1580 | /* Register with the I/O Bus mapper */ | |
1581 | mcl_paddr_base = IOMapperIOVMAlloc(mcl_pages); | |
b0d623f7 | 1582 | bzero((char *)mcl_paddr, mcl_pages * sizeof (ppnum_t)); |
2d21ac55 | 1583 | |
3e170ce0 A |
1584 | embutl = (mbutl + (nmbclusters * MCLBYTES)); |
1585 | VERIFY(((embutl - mbutl) % MBIGCLBYTES) == 0); | |
2d21ac55 | 1586 | |
6d2010ae | 1587 | /* Prime up the freelist */ |
593a1d5f | 1588 | PE_parse_boot_argn("initmcl", &initmcl, sizeof (initmcl)); |
6d2010ae A |
1589 | if (initmcl != 0) { |
1590 | initmcl >>= NCLPBGSHIFT; /* become a 4K unit */ | |
1591 | if (initmcl > m_maxlimit(MC_BIGCL)) | |
1592 | initmcl = m_maxlimit(MC_BIGCL); | |
1593 | } | |
1594 | if (initmcl < m_minlimit(MC_BIGCL)) | |
1595 | initmcl = m_minlimit(MC_BIGCL); | |
2d21ac55 A |
1596 | |
1597 | lck_mtx_lock(mbuf_mlock); | |
1598 | ||
6d2010ae A |
1599 | /* |
1600 | * For classes with non-zero minimum limits, populate their freelists | |
1601 | * so that m_total(class) is at least m_minlimit(class). | |
1602 | */ | |
1603 | VERIFY(m_total(MC_BIGCL) == 0 && m_minlimit(MC_BIGCL) != 0); | |
1604 | freelist_populate(m_class(MC_BIGCL), initmcl, M_WAIT); | |
1605 | VERIFY(m_total(MC_BIGCL) >= m_minlimit(MC_BIGCL)); | |
1606 | freelist_init(m_class(MC_CL)); | |
1607 | ||
1608 | for (m = 0; m < NELEM(mbuf_table); m++) { | |
1609 | /* Make sure we didn't miss any */ | |
1610 | VERIFY(m_minlimit(m_class(m)) == 0 || | |
1611 | m_total(m_class(m)) >= m_minlimit(m_class(m))); | |
fe8ab488 A |
1612 | |
1613 | /* populate the initial sizes and report from there on */ | |
1614 | m_peak(m_class(m)) = m_total(m_class(m)); | |
6d2010ae | 1615 | } |
fe8ab488 | 1616 | mb_peak_newreport = FALSE; |
2d21ac55 A |
1617 | |
1618 | lck_mtx_unlock(mbuf_mlock); | |
1619 | ||
6d2010ae A |
1620 | (void) kernel_thread_start((thread_continue_t)mbuf_worker_thread_init, |
1621 | NULL, &thread); | |
b0d623f7 | 1622 | thread_deallocate(thread); |
2d21ac55 A |
1623 | |
1624 | ref_cache = mcache_create("mext_ref", sizeof (struct ext_ref), | |
1625 | 0, 0, MCR_SLEEP); | |
1626 | ||
1627 | /* Create the cache for each class */ | |
1628 | for (m = 0; m < NELEM(mbuf_table); m++) { | |
6d2010ae | 1629 | void *allocfunc, *freefunc, *auditfunc, *logfunc; |
2d21ac55 A |
1630 | u_int32_t flags; |
1631 | ||
1632 | flags = mbuf_debug; | |
1633 | if (m_class(m) == MC_MBUF_CL || m_class(m) == MC_MBUF_BIGCL || | |
1634 | m_class(m) == MC_MBUF_16KCL) { | |
1635 | allocfunc = mbuf_cslab_alloc; | |
1636 | freefunc = mbuf_cslab_free; | |
1637 | auditfunc = mbuf_cslab_audit; | |
6d2010ae | 1638 | logfunc = mleak_logger; |
2d21ac55 A |
1639 | } else { |
1640 | allocfunc = mbuf_slab_alloc; | |
1641 | freefunc = mbuf_slab_free; | |
1642 | auditfunc = mbuf_slab_audit; | |
6d2010ae | 1643 | logfunc = mleak_logger; |
2d21ac55 A |
1644 | } |
1645 | ||
1646 | /* | |
1647 | * Disable per-CPU caches for jumbo classes if there | |
1648 | * is no jumbo cluster pool available in the system. | |
1649 | * The cache itself is still created (but will never | |
1650 | * be populated) since it simplifies the code. | |
1651 | */ | |
1652 | if ((m_class(m) == MC_MBUF_16KCL || m_class(m) == MC_16KCL) && | |
1653 | njcl == 0) | |
1654 | flags |= MCF_NOCPUCACHE; | |
1655 | ||
6d2010ae A |
1656 | if (!mclfindleak) |
1657 | flags |= MCF_NOLEAKLOG; | |
1658 | ||
2d21ac55 | 1659 | m_cache(m) = mcache_create_ext(m_cname(m), m_maxsize(m), |
6d2010ae | 1660 | allocfunc, freefunc, auditfunc, logfunc, mbuf_slab_notify, |
b0d623f7 | 1661 | (void *)(uintptr_t)m, flags, MCR_SLEEP); |
2d21ac55 A |
1662 | } |
1663 | ||
1664 | /* | |
1665 | * Allocate structure for per-CPU statistics that's aligned | |
1666 | * on the CPU cache boundary; this code assumes that we never | |
1667 | * uninitialize this framework, since the original address | |
1668 | * before alignment is not saved. | |
1669 | */ | |
1670 | ncpu = ml_get_max_cpus(); | |
39236c6e | 1671 | MALLOC(buf, void *, MBUF_MTYPES_SIZE(ncpu) + CPU_CACHE_LINE_SIZE, |
2d21ac55 A |
1672 | M_TEMP, M_WAITOK); |
1673 | VERIFY(buf != NULL); | |
1674 | ||
39236c6e A |
1675 | mbuf_mtypes = (mbuf_mtypes_t *)P2ROUNDUP((intptr_t)buf, |
1676 | CPU_CACHE_LINE_SIZE); | |
2d21ac55 A |
1677 | bzero(mbuf_mtypes, MBUF_MTYPES_SIZE(ncpu)); |
1678 | ||
6d2010ae A |
1679 | /* |
1680 | * Set the max limit on sb_max to be 1/16 th of the size of | |
b0d623f7 A |
1681 | * memory allocated for mbuf clusters. |
1682 | */ | |
6d2010ae | 1683 | high_sb_max = (nmbclusters << (MCLSHIFT - 4)); |
b0d623f7 A |
1684 | if (high_sb_max < sb_max) { |
1685 | /* sb_max is too large for this configuration, scale it down */ | |
6d2010ae | 1686 | if (high_sb_max > (1 << MBSHIFT)) { |
b0d623f7 A |
1687 | /* We have atleast 16 M of mbuf pool */ |
1688 | sb_max = high_sb_max; | |
1689 | } else if ((nmbclusters << MCLSHIFT) > (1 << MBSHIFT)) { | |
6d2010ae A |
1690 | /* |
1691 | * If we have more than 1M of mbufpool, cap the size of | |
b0d623f7 | 1692 | * max sock buf at 1M |
6d2010ae | 1693 | */ |
b0d623f7 A |
1694 | sb_max = high_sb_max = (1 << MBSHIFT); |
1695 | } else { | |
1696 | sb_max = high_sb_max; | |
1697 | } | |
1698 | } | |
1699 | ||
316670eb A |
1700 | /* allocate space for mbuf_dump_buf */ |
1701 | MALLOC(mbuf_dump_buf, char *, MBUF_DUMP_BUF_SIZE, M_TEMP, M_WAITOK); | |
1702 | VERIFY(mbuf_dump_buf != NULL); | |
1703 | ||
39236c6e A |
1704 | if (mbuf_debug & MCF_DEBUG) { |
1705 | printf("%s: MLEN %d, MHLEN %d\n", __func__, | |
1706 | (int)_MLEN, (int)_MHLEN); | |
1707 | } | |
1708 | ||
1709 | printf("%s: done [%d MB total pool size, (%d/%d) split]\n", __func__, | |
6d2010ae A |
1710 | (nmbclusters << MCLSHIFT) >> MBSHIFT, |
1711 | (nclusters << MCLSHIFT) >> MBSHIFT, | |
1712 | (njcl << MCLSHIFT) >> MBSHIFT); | |
39037602 A |
1713 | |
1714 | /* initialize lock form tx completion callback table */ | |
1715 | mbuf_tx_compl_tbl_lck_grp_attr = lck_grp_attr_alloc_init(); | |
1716 | if (mbuf_tx_compl_tbl_lck_grp_attr == NULL) { | |
1717 | panic("%s: lck_grp_attr_alloc_init failed", __func__); | |
1718 | /* NOTREACHED */ | |
1719 | } | |
1720 | mbuf_tx_compl_tbl_lck_grp = lck_grp_alloc_init("mbuf_tx_compl_tbl", | |
1721 | mbuf_tx_compl_tbl_lck_grp_attr); | |
1722 | if (mbuf_tx_compl_tbl_lck_grp == NULL) { | |
1723 | panic("%s: lck_grp_alloc_init failed", __func__); | |
1724 | /* NOTREACHED */ | |
1725 | } | |
1726 | mbuf_tx_compl_tbl_lck_attr = lck_attr_alloc_init(); | |
1727 | if (mbuf_tx_compl_tbl_lck_attr == NULL) { | |
1728 | panic("%s: lck_attr_alloc_init failed", __func__); | |
1729 | /* NOTREACHED */ | |
1730 | } | |
1731 | lck_rw_init(mbuf_tx_compl_tbl_lock, mbuf_tx_compl_tbl_lck_grp, | |
1732 | mbuf_tx_compl_tbl_lck_attr); | |
1733 | ||
2d21ac55 A |
1734 | } |
1735 | ||
1736 | /* | |
1737 | * Obtain a slab of object(s) from the class's freelist. | |
1738 | */ | |
1739 | static mcache_obj_t * | |
1740 | slab_alloc(mbuf_class_t class, int wait) | |
1741 | { | |
1742 | mcl_slab_t *sp; | |
1743 | mcache_obj_t *buf; | |
1744 | ||
1745 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
1746 | ||
2d21ac55 A |
1747 | /* This should always be NULL for us */ |
1748 | VERIFY(m_cobjlist(class) == NULL); | |
1749 | ||
1750 | /* | |
1751 | * Treat composite objects as having longer lifespan by using | |
1752 | * a slab from the reverse direction, in hoping that this could | |
1753 | * reduce the probability of fragmentation for slabs that hold | |
1754 | * more than one buffer chunks (e.g. mbuf slabs). For other | |
1755 | * slabs, this probably doesn't make much of a difference. | |
1756 | */ | |
3e170ce0 A |
1757 | if ((class == MC_MBUF || class == MC_CL || class == MC_BIGCL) |
1758 | && (wait & MCR_COMP)) | |
2d21ac55 A |
1759 | sp = (mcl_slab_t *)TAILQ_LAST(&m_slablist(class), mcl_slhead); |
1760 | else | |
1761 | sp = (mcl_slab_t *)TAILQ_FIRST(&m_slablist(class)); | |
1762 | ||
1763 | if (sp == NULL) { | |
1764 | VERIFY(m_infree(class) == 0 && m_slab_cnt(class) == 0); | |
1765 | /* The slab list for this class is empty */ | |
1766 | return (NULL); | |
1767 | } | |
1768 | ||
1769 | VERIFY(m_infree(class) > 0); | |
1770 | VERIFY(!slab_is_detached(sp)); | |
1771 | VERIFY(sp->sl_class == class && | |
1772 | (sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED); | |
1773 | buf = sp->sl_head; | |
1774 | VERIFY(slab_inrange(sp, buf) && sp == slab_get(buf)); | |
3e170ce0 A |
1775 | sp->sl_head = buf->obj_next; |
1776 | /* Increment slab reference */ | |
1777 | sp->sl_refcnt++; | |
1778 | ||
1779 | VERIFY(sp->sl_head != NULL || sp->sl_refcnt == sp->sl_chunks); | |
2d21ac55 | 1780 | |
2d21ac55 A |
1781 | if (sp->sl_head != NULL && !slab_inrange(sp, sp->sl_head)) { |
1782 | slab_nextptr_panic(sp, sp->sl_head); | |
1783 | /* In case sl_head is in the map but not in the slab */ | |
1784 | VERIFY(slab_inrange(sp, sp->sl_head)); | |
1785 | /* NOTREACHED */ | |
1786 | } | |
1787 | ||
2d21ac55 A |
1788 | if (mclaudit != NULL) { |
1789 | mcache_audit_t *mca = mcl_audit_buf2mca(class, buf); | |
1790 | mca->mca_uflags = 0; | |
1791 | /* Save contents on mbuf objects only */ | |
1792 | if (class == MC_MBUF) | |
1793 | mca->mca_uflags |= MB_SCVALID; | |
1794 | } | |
1795 | ||
1796 | if (class == MC_CL) { | |
1797 | mbstat.m_clfree = (--m_infree(MC_CL)) + m_infree(MC_MBUF_CL); | |
1798 | /* | |
3e170ce0 | 1799 | * A 2K cluster slab can have at most NCLPG references. |
2d21ac55 | 1800 | */ |
3e170ce0 A |
1801 | VERIFY(sp->sl_refcnt >= 1 && sp->sl_refcnt <= NCLPG && |
1802 | sp->sl_chunks == NCLPG && sp->sl_len == PAGE_SIZE); | |
1803 | VERIFY(sp->sl_refcnt < NCLPG || sp->sl_head == NULL); | |
2d21ac55 | 1804 | } else if (class == MC_BIGCL) { |
2d21ac55 A |
1805 | mbstat.m_bigclfree = (--m_infree(MC_BIGCL)) + |
1806 | m_infree(MC_MBUF_BIGCL); | |
1807 | /* | |
3e170ce0 | 1808 | * A 4K cluster slab can have NBCLPG references. |
2d21ac55 | 1809 | */ |
3e170ce0 | 1810 | VERIFY(sp->sl_refcnt >= 1 && sp->sl_chunks == NBCLPG && |
39037602 | 1811 | sp->sl_len == PAGE_SIZE && |
3e170ce0 | 1812 | (sp->sl_refcnt < NBCLPG || sp->sl_head == NULL)); |
2d21ac55 A |
1813 | } else if (class == MC_16KCL) { |
1814 | mcl_slab_t *nsp; | |
1815 | int k; | |
1816 | ||
1817 | --m_infree(MC_16KCL); | |
1818 | VERIFY(sp->sl_refcnt == 1 && sp->sl_chunks == 1 && | |
6d2010ae | 1819 | sp->sl_len == m_maxsize(class) && sp->sl_head == NULL); |
2d21ac55 | 1820 | /* |
6d2010ae A |
1821 | * Increment 2nd-Nth slab reference, where N is NSLABSP16KB. |
1822 | * A 16KB big cluster takes NSLABSP16KB slabs, each having at | |
1823 | * most 1 reference. | |
2d21ac55 | 1824 | */ |
6d2010ae | 1825 | for (nsp = sp, k = 1; k < NSLABSP16KB; k++) { |
2d21ac55 A |
1826 | nsp = nsp->sl_next; |
1827 | /* Next slab must already be present */ | |
1828 | VERIFY(nsp != NULL); | |
1829 | nsp->sl_refcnt++; | |
1830 | VERIFY(!slab_is_detached(nsp)); | |
1831 | VERIFY(nsp->sl_class == MC_16KCL && | |
1832 | nsp->sl_flags == (SLF_MAPPED | SLF_PARTIAL) && | |
1833 | nsp->sl_refcnt == 1 && nsp->sl_chunks == 0 && | |
1834 | nsp->sl_len == 0 && nsp->sl_base == sp->sl_base && | |
1835 | nsp->sl_head == NULL); | |
1836 | } | |
1837 | } else { | |
6d2010ae | 1838 | VERIFY(class == MC_MBUF); |
2d21ac55 A |
1839 | --m_infree(MC_MBUF); |
1840 | /* | |
1841 | * If auditing is turned on, this check is | |
1842 | * deferred until later in mbuf_slab_audit(). | |
1843 | */ | |
1844 | if (mclaudit == NULL) | |
1845 | _MCHECK((struct mbuf *)buf); | |
1846 | /* | |
1847 | * Since we have incremented the reference count above, | |
6d2010ae | 1848 | * an mbuf slab (formerly a 4KB cluster slab that was cut |
2d21ac55 | 1849 | * up into mbufs) must have a reference count between 1 |
3e170ce0 | 1850 | * and NMBPG at this point. |
2d21ac55 | 1851 | */ |
3e170ce0 A |
1852 | VERIFY(sp->sl_refcnt >= 1 && sp->sl_refcnt <= NMBPG && |
1853 | sp->sl_chunks == NMBPG && | |
1854 | sp->sl_len == PAGE_SIZE); | |
1855 | VERIFY(sp->sl_refcnt < NMBPG || sp->sl_head == NULL); | |
2d21ac55 A |
1856 | } |
1857 | ||
1858 | /* If empty, remove this slab from the class's freelist */ | |
1859 | if (sp->sl_head == NULL) { | |
3e170ce0 A |
1860 | VERIFY(class != MC_MBUF || sp->sl_refcnt == NMBPG); |
1861 | VERIFY(class != MC_CL || sp->sl_refcnt == NCLPG); | |
1862 | VERIFY(class != MC_BIGCL || sp->sl_refcnt == NBCLPG); | |
2d21ac55 A |
1863 | slab_remove(sp, class); |
1864 | } | |
1865 | ||
1866 | return (buf); | |
1867 | } | |
1868 | ||
1869 | /* | |
1870 | * Place a slab of object(s) back into a class's slab list. | |
1871 | */ | |
1872 | static void | |
1873 | slab_free(mbuf_class_t class, mcache_obj_t *buf) | |
1874 | { | |
1875 | mcl_slab_t *sp; | |
3e170ce0 A |
1876 | boolean_t reinit_supercl = false; |
1877 | mbuf_class_t super_class; | |
2d21ac55 A |
1878 | |
1879 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
1880 | ||
1881 | VERIFY(class != MC_16KCL || njcl > 0); | |
1882 | VERIFY(buf->obj_next == NULL); | |
3e170ce0 | 1883 | |
2d21ac55 A |
1884 | sp = slab_get(buf); |
1885 | VERIFY(sp->sl_class == class && slab_inrange(sp, buf) && | |
1886 | (sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED); | |
1887 | ||
1888 | /* Decrement slab reference */ | |
1889 | sp->sl_refcnt--; | |
1890 | ||
6d2010ae | 1891 | if (class == MC_CL) { |
2d21ac55 A |
1892 | VERIFY(IS_P2ALIGNED(buf, MCLBYTES)); |
1893 | /* | |
6d2010ae A |
1894 | * A slab that has been splitted for 2KB clusters can have |
1895 | * at most 1 outstanding reference at this point. | |
1896 | */ | |
3e170ce0 A |
1897 | VERIFY(sp->sl_refcnt >= 0 && sp->sl_refcnt <= (NCLPG - 1) && |
1898 | sp->sl_chunks == NCLPG && sp->sl_len == PAGE_SIZE); | |
1899 | VERIFY(sp->sl_refcnt < (NCLPG - 1) || | |
6d2010ae A |
1900 | (slab_is_detached(sp) && sp->sl_head == NULL)); |
1901 | } else if (class == MC_BIGCL) { | |
3e170ce0 A |
1902 | VERIFY(IS_P2ALIGNED(buf, MBIGCLBYTES)); |
1903 | ||
1904 | /* A 4KB cluster slab can have NBCLPG references at most */ | |
1905 | VERIFY(sp->sl_refcnt >= 0 && sp->sl_chunks == NBCLPG); | |
1906 | VERIFY(sp->sl_refcnt < (NBCLPG - 1) || | |
1907 | (slab_is_detached(sp) && sp->sl_head == NULL)); | |
2d21ac55 A |
1908 | } else if (class == MC_16KCL) { |
1909 | mcl_slab_t *nsp; | |
1910 | int k; | |
1911 | /* | |
6d2010ae | 1912 | * A 16KB cluster takes NSLABSP16KB slabs, all must |
2d21ac55 A |
1913 | * now have 0 reference. |
1914 | */ | |
3e170ce0 | 1915 | VERIFY(IS_P2ALIGNED(buf, PAGE_SIZE)); |
2d21ac55 | 1916 | VERIFY(sp->sl_refcnt == 0 && sp->sl_chunks == 1 && |
6d2010ae | 1917 | sp->sl_len == m_maxsize(class) && sp->sl_head == NULL); |
2d21ac55 | 1918 | VERIFY(slab_is_detached(sp)); |
6d2010ae | 1919 | for (nsp = sp, k = 1; k < NSLABSP16KB; k++) { |
2d21ac55 A |
1920 | nsp = nsp->sl_next; |
1921 | /* Next slab must already be present */ | |
1922 | VERIFY(nsp != NULL); | |
1923 | nsp->sl_refcnt--; | |
1924 | VERIFY(slab_is_detached(nsp)); | |
1925 | VERIFY(nsp->sl_class == MC_16KCL && | |
1926 | (nsp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) && | |
1927 | nsp->sl_refcnt == 0 && nsp->sl_chunks == 0 && | |
1928 | nsp->sl_len == 0 && nsp->sl_base == sp->sl_base && | |
1929 | nsp->sl_head == NULL); | |
1930 | } | |
1931 | } else { | |
1932 | /* | |
3e170ce0 A |
1933 | * A slab that has been splitted for mbufs has at most |
1934 | * NMBPG reference counts. Since we have decremented | |
1935 | * one reference above, it must now be between 0 and | |
1936 | * NMBPG-1. | |
2d21ac55 | 1937 | */ |
6d2010ae | 1938 | VERIFY(class == MC_MBUF); |
3e170ce0 A |
1939 | VERIFY(sp->sl_refcnt >= 0 && |
1940 | sp->sl_refcnt <= (NMBPG - 1) && | |
1941 | sp->sl_chunks == NMBPG && | |
1942 | sp->sl_len == PAGE_SIZE); | |
1943 | VERIFY(sp->sl_refcnt < (NMBPG - 1) || | |
2d21ac55 A |
1944 | (slab_is_detached(sp) && sp->sl_head == NULL)); |
1945 | } | |
1946 | ||
1947 | /* | |
1948 | * When auditing is enabled, ensure that the buffer still | |
1949 | * contains the free pattern. Otherwise it got corrupted | |
1950 | * while at the CPU cache layer. | |
1951 | */ | |
1952 | if (mclaudit != NULL) { | |
1953 | mcache_audit_t *mca = mcl_audit_buf2mca(class, buf); | |
6d2010ae | 1954 | if (mclverify) { |
3e170ce0 A |
1955 | mcache_audit_free_verify(mca, buf, 0, |
1956 | m_maxsize(class)); | |
6d2010ae | 1957 | } |
2d21ac55 A |
1958 | mca->mca_uflags &= ~MB_SCVALID; |
1959 | } | |
1960 | ||
1961 | if (class == MC_CL) { | |
1962 | mbstat.m_clfree = (++m_infree(MC_CL)) + m_infree(MC_MBUF_CL); | |
6d2010ae | 1963 | buf->obj_next = sp->sl_head; |
2d21ac55 A |
1964 | } else if (class == MC_BIGCL) { |
1965 | mbstat.m_bigclfree = (++m_infree(MC_BIGCL)) + | |
1966 | m_infree(MC_MBUF_BIGCL); | |
3e170ce0 | 1967 | buf->obj_next = sp->sl_head; |
2d21ac55 A |
1968 | } else if (class == MC_16KCL) { |
1969 | ++m_infree(MC_16KCL); | |
1970 | } else { | |
1971 | ++m_infree(MC_MBUF); | |
1972 | buf->obj_next = sp->sl_head; | |
1973 | } | |
1974 | sp->sl_head = buf; | |
1975 | ||
6d2010ae | 1976 | /* |
3e170ce0 A |
1977 | * If a slab has been split to either one which holds 2KB clusters, |
1978 | * or one which holds mbufs, turn it back to one which holds a | |
1979 | * 4 or 16 KB cluster depending on the page size. | |
6d2010ae | 1980 | */ |
3e170ce0 A |
1981 | if (m_maxsize(MC_BIGCL) == PAGE_SIZE) { |
1982 | super_class = MC_BIGCL; | |
1983 | } else { | |
1984 | VERIFY(PAGE_SIZE == m_maxsize(MC_16KCL)); | |
1985 | super_class = MC_16KCL; | |
1986 | } | |
6d2010ae | 1987 | if (class == MC_MBUF && sp->sl_refcnt == 0 && |
3e170ce0 A |
1988 | m_total(class) >= (m_minlimit(class) + NMBPG) && |
1989 | m_total(super_class) < m_maxlimit(super_class)) { | |
1990 | int i = NMBPG; | |
6d2010ae | 1991 | |
3e170ce0 | 1992 | m_total(MC_MBUF) -= NMBPG; |
2d21ac55 | 1993 | mbstat.m_mbufs = m_total(MC_MBUF); |
3e170ce0 A |
1994 | m_infree(MC_MBUF) -= NMBPG; |
1995 | mtype_stat_add(MT_FREE, -((unsigned)NMBPG)); | |
2d21ac55 A |
1996 | |
1997 | while (i--) { | |
1998 | struct mbuf *m = sp->sl_head; | |
1999 | VERIFY(m != NULL); | |
2000 | sp->sl_head = m->m_next; | |
2001 | m->m_next = NULL; | |
2002 | } | |
3e170ce0 | 2003 | reinit_supercl = true; |
6d2010ae | 2004 | } else if (class == MC_CL && sp->sl_refcnt == 0 && |
3e170ce0 A |
2005 | m_total(class) >= (m_minlimit(class) + NCLPG) && |
2006 | m_total(super_class) < m_maxlimit(super_class)) { | |
2007 | int i = NCLPG; | |
6d2010ae | 2008 | |
3e170ce0 | 2009 | m_total(MC_CL) -= NCLPG; |
6d2010ae | 2010 | mbstat.m_clusters = m_total(MC_CL); |
3e170ce0 | 2011 | m_infree(MC_CL) -= NCLPG; |
6d2010ae A |
2012 | |
2013 | while (i--) { | |
2014 | union mcluster *c = sp->sl_head; | |
2015 | VERIFY(c != NULL); | |
2016 | sp->sl_head = c->mcl_next; | |
2017 | c->mcl_next = NULL; | |
2018 | } | |
3e170ce0 A |
2019 | reinit_supercl = true; |
2020 | } else if (class == MC_BIGCL && super_class != MC_BIGCL && | |
2021 | sp->sl_refcnt == 0 && | |
2022 | m_total(class) >= (m_minlimit(class) + NBCLPG) && | |
2023 | m_total(super_class) < m_maxlimit(super_class)) { | |
2024 | int i = NBCLPG; | |
2025 | ||
2026 | VERIFY(super_class == MC_16KCL); | |
2027 | m_total(MC_BIGCL) -= NBCLPG; | |
2028 | mbstat.m_bigclusters = m_total(MC_BIGCL); | |
2029 | m_infree(MC_BIGCL) -= NBCLPG; | |
6d2010ae | 2030 | |
3e170ce0 A |
2031 | while (i--) { |
2032 | union mbigcluster *bc = sp->sl_head; | |
2033 | VERIFY(bc != NULL); | |
2034 | sp->sl_head = bc->mbc_next; | |
2035 | bc->mbc_next = NULL; | |
2036 | } | |
2037 | reinit_supercl = true; | |
2038 | } | |
2039 | ||
2040 | if (reinit_supercl) { | |
2041 | VERIFY(sp->sl_head == NULL); | |
2042 | VERIFY(m_total(class) >= m_minlimit(class)); | |
6d2010ae A |
2043 | slab_remove(sp, class); |
2044 | ||
3e170ce0 A |
2045 | /* Reinitialize it as a cluster for the super class */ |
2046 | m_total(super_class)++; | |
2047 | m_infree(super_class)++; | |
2048 | VERIFY(sp->sl_flags == (SLF_MAPPED | SLF_DETACHED) && | |
2049 | sp->sl_len == PAGE_SIZE && sp->sl_refcnt == 0); | |
6d2010ae | 2050 | |
3e170ce0 A |
2051 | slab_init(sp, super_class, SLF_MAPPED, sp->sl_base, |
2052 | sp->sl_base, PAGE_SIZE, 0, 1); | |
2053 | if (mclverify) | |
6d2010ae | 2054 | mcache_set_pattern(MCACHE_FREE_PATTERN, |
3e170ce0 A |
2055 | (caddr_t)sp->sl_base, sp->sl_len); |
2056 | ((mcache_obj_t *)(sp->sl_base))->obj_next = NULL; | |
2057 | ||
2058 | if (super_class == MC_BIGCL) { | |
2059 | mbstat.m_bigclusters = m_total(MC_BIGCL); | |
2060 | mbstat.m_bigclfree = m_infree(MC_BIGCL) + | |
2061 | m_infree(MC_MBUF_BIGCL); | |
6d2010ae | 2062 | } |
2d21ac55 A |
2063 | |
2064 | VERIFY(slab_is_detached(sp)); | |
3e170ce0 A |
2065 | VERIFY(m_total(super_class) <= m_maxlimit(super_class)); |
2066 | ||
2d21ac55 | 2067 | /* And finally switch class */ |
3e170ce0 | 2068 | class = super_class; |
2d21ac55 A |
2069 | } |
2070 | ||
2071 | /* Reinsert the slab to the class's slab list */ | |
2072 | if (slab_is_detached(sp)) | |
2073 | slab_insert(sp, class); | |
2074 | } | |
2075 | ||
2076 | /* | |
2077 | * Common allocator for rudimentary objects called by the CPU cache layer | |
2078 | * during an allocation request whenever there is no available element in the | |
2079 | * bucket layer. It returns one or more elements from the appropriate global | |
2080 | * freelist. If the freelist is empty, it will attempt to populate it and | |
2081 | * retry the allocation. | |
2082 | */ | |
2083 | static unsigned int | |
2084 | mbuf_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num, int wait) | |
2085 | { | |
2086 | mbuf_class_t class = (mbuf_class_t)arg; | |
2087 | unsigned int need = num; | |
2088 | mcache_obj_t **list = *plist; | |
2089 | ||
2090 | ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class)); | |
2091 | ASSERT(need > 0); | |
2092 | ||
2093 | lck_mtx_lock(mbuf_mlock); | |
2094 | ||
2095 | for (;;) { | |
2096 | if ((*list = slab_alloc(class, wait)) != NULL) { | |
2097 | (*list)->obj_next = NULL; | |
2098 | list = *plist = &(*list)->obj_next; | |
2099 | ||
2100 | if (--need == 0) { | |
2101 | /* | |
2102 | * If the number of elements in freelist has | |
2103 | * dropped below low watermark, asynchronously | |
2104 | * populate the freelist now rather than doing | |
2105 | * it later when we run out of elements. | |
2106 | */ | |
2107 | if (!mbuf_cached_above(class, wait) && | |
3e170ce0 | 2108 | m_infree(class) < (m_total(class) >> 5)) { |
2d21ac55 A |
2109 | (void) freelist_populate(class, 1, |
2110 | M_DONTWAIT); | |
2111 | } | |
2112 | break; | |
2113 | } | |
2114 | } else { | |
2115 | VERIFY(m_infree(class) == 0 || class == MC_CL); | |
2116 | ||
2117 | (void) freelist_populate(class, 1, | |
2118 | (wait & MCR_NOSLEEP) ? M_DONTWAIT : M_WAIT); | |
2119 | ||
2120 | if (m_infree(class) > 0) | |
2121 | continue; | |
2122 | ||
2123 | /* Check if there's anything at the cache layer */ | |
2124 | if (mbuf_cached_above(class, wait)) | |
2125 | break; | |
2126 | ||
6d2010ae A |
2127 | /* watchdog checkpoint */ |
2128 | mbuf_watchdog(); | |
2129 | ||
2d21ac55 A |
2130 | /* We have nothing and cannot block; give up */ |
2131 | if (wait & MCR_NOSLEEP) { | |
2132 | if (!(wait & MCR_TRYHARD)) { | |
2133 | m_fail_cnt(class)++; | |
2134 | mbstat.m_drops++; | |
2135 | break; | |
2136 | } | |
2137 | } | |
2138 | ||
2139 | /* | |
2140 | * If the freelist is still empty and the caller is | |
2141 | * willing to be blocked, sleep on the wait channel | |
2142 | * until an element is available. Otherwise, if | |
2143 | * MCR_TRYHARD is set, do our best to satisfy the | |
2144 | * request without having to go to sleep. | |
2145 | */ | |
2146 | if (mbuf_worker_ready && | |
2147 | mbuf_sleep(class, need, wait)) | |
2148 | break; | |
2149 | ||
2150 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
2151 | } | |
2152 | } | |
2153 | ||
2154 | m_alloc_cnt(class) += num - need; | |
2155 | lck_mtx_unlock(mbuf_mlock); | |
2156 | ||
2157 | return (num - need); | |
2158 | } | |
2159 | ||
2160 | /* | |
2161 | * Common de-allocator for rudimentary objects called by the CPU cache | |
2162 | * layer when one or more elements need to be returned to the appropriate | |
2163 | * global freelist. | |
2164 | */ | |
2165 | static void | |
2166 | mbuf_slab_free(void *arg, mcache_obj_t *list, __unused int purged) | |
2167 | { | |
2168 | mbuf_class_t class = (mbuf_class_t)arg; | |
2169 | mcache_obj_t *nlist; | |
2170 | unsigned int num = 0; | |
2171 | int w; | |
2172 | ||
2173 | ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class)); | |
2174 | ||
2175 | lck_mtx_lock(mbuf_mlock); | |
2176 | ||
2177 | for (;;) { | |
2178 | nlist = list->obj_next; | |
2179 | list->obj_next = NULL; | |
2180 | slab_free(class, list); | |
2181 | ++num; | |
2182 | if ((list = nlist) == NULL) | |
2183 | break; | |
2184 | } | |
2185 | m_free_cnt(class) += num; | |
2186 | ||
2187 | if ((w = mb_waiters) > 0) | |
2188 | mb_waiters = 0; | |
2189 | ||
2190 | lck_mtx_unlock(mbuf_mlock); | |
2191 | ||
2192 | if (w != 0) | |
2193 | wakeup(mb_waitchan); | |
2194 | } | |
2195 | ||
2196 | /* | |
2197 | * Common auditor for rudimentary objects called by the CPU cache layer | |
2198 | * during an allocation or free request. For the former, this is called | |
2199 | * after the objects are obtained from either the bucket or slab layer | |
2200 | * and before they are returned to the caller. For the latter, this is | |
2201 | * called immediately during free and before placing the objects into | |
2202 | * the bucket or slab layer. | |
2203 | */ | |
2204 | static void | |
2205 | mbuf_slab_audit(void *arg, mcache_obj_t *list, boolean_t alloc) | |
2206 | { | |
2207 | mbuf_class_t class = (mbuf_class_t)arg; | |
2208 | mcache_audit_t *mca; | |
2209 | ||
2210 | ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class)); | |
2211 | ||
2212 | while (list != NULL) { | |
2213 | lck_mtx_lock(mbuf_mlock); | |
2214 | mca = mcl_audit_buf2mca(class, list); | |
2215 | ||
2216 | /* Do the sanity checks */ | |
2217 | if (class == MC_MBUF) { | |
2218 | mcl_audit_mbuf(mca, list, FALSE, alloc); | |
2219 | ASSERT(mca->mca_uflags & MB_SCVALID); | |
2220 | } else { | |
2221 | mcl_audit_cluster(mca, list, m_maxsize(class), | |
2222 | alloc, TRUE); | |
2223 | ASSERT(!(mca->mca_uflags & MB_SCVALID)); | |
2224 | } | |
2225 | /* Record this transaction */ | |
6d2010ae | 2226 | if (mcltrace) |
39236c6e | 2227 | mcache_buffer_log(mca, list, m_cache(class), &mb_start); |
6d2010ae | 2228 | |
2d21ac55 A |
2229 | if (alloc) |
2230 | mca->mca_uflags |= MB_INUSE; | |
2231 | else | |
2232 | mca->mca_uflags &= ~MB_INUSE; | |
2233 | /* Unpair the object (unconditionally) */ | |
2234 | mca->mca_uptr = NULL; | |
2235 | lck_mtx_unlock(mbuf_mlock); | |
2236 | ||
2237 | list = list->obj_next; | |
2238 | } | |
2239 | } | |
2240 | ||
2241 | /* | |
2242 | * Common notify routine for all caches. It is called by mcache when | |
2243 | * one or more objects get freed. We use this indication to trigger | |
2244 | * the wakeup of any sleeping threads so that they can retry their | |
2245 | * allocation requests. | |
2246 | */ | |
2247 | static void | |
2248 | mbuf_slab_notify(void *arg, u_int32_t reason) | |
2249 | { | |
2250 | mbuf_class_t class = (mbuf_class_t)arg; | |
2251 | int w; | |
2252 | ||
2253 | ASSERT(MBUF_CLASS_VALID(class)); | |
2254 | ||
2255 | if (reason != MCN_RETRYALLOC) | |
2256 | return; | |
2257 | ||
2258 | lck_mtx_lock(mbuf_mlock); | |
2259 | if ((w = mb_waiters) > 0) { | |
2260 | m_notified(class)++; | |
2261 | mb_waiters = 0; | |
2262 | } | |
2263 | lck_mtx_unlock(mbuf_mlock); | |
2264 | ||
2265 | if (w != 0) | |
2266 | wakeup(mb_waitchan); | |
2267 | } | |
2268 | ||
2269 | /* | |
2270 | * Obtain object(s) from the composite class's freelist. | |
2271 | */ | |
2272 | static unsigned int | |
2273 | cslab_alloc(mbuf_class_t class, mcache_obj_t ***plist, unsigned int num) | |
2274 | { | |
2275 | unsigned int need = num; | |
2276 | mcl_slab_t *sp, *clsp, *nsp; | |
2277 | struct mbuf *m; | |
2278 | mcache_obj_t **list = *plist; | |
2279 | void *cl; | |
2280 | ||
2281 | VERIFY(need > 0); | |
2282 | VERIFY(class != MC_MBUF_16KCL || njcl > 0); | |
2283 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
2284 | ||
2285 | /* Get what we can from the freelist */ | |
2286 | while ((*list = m_cobjlist(class)) != NULL) { | |
2287 | MRANGE(*list); | |
2288 | ||
2289 | m = (struct mbuf *)*list; | |
2290 | sp = slab_get(m); | |
2291 | cl = m->m_ext.ext_buf; | |
2292 | clsp = slab_get(cl); | |
2293 | VERIFY(m->m_flags == M_EXT && cl != NULL); | |
813fb2f6 | 2294 | VERIFY(m_get_rfa(m) != NULL && MBUF_IS_COMPOSITE(m)); |
6d2010ae A |
2295 | |
2296 | if (class == MC_MBUF_CL) { | |
2297 | VERIFY(clsp->sl_refcnt >= 1 && | |
3e170ce0 | 2298 | clsp->sl_refcnt <= NCLPG); |
6d2010ae | 2299 | } else { |
3e170ce0 A |
2300 | VERIFY(clsp->sl_refcnt >= 1 && |
2301 | clsp->sl_refcnt <= NBCLPG); | |
6d2010ae A |
2302 | } |
2303 | ||
2304 | if (class == MC_MBUF_16KCL) { | |
2d21ac55 | 2305 | int k; |
6d2010ae | 2306 | for (nsp = clsp, k = 1; k < NSLABSP16KB; k++) { |
2d21ac55 A |
2307 | nsp = nsp->sl_next; |
2308 | /* Next slab must already be present */ | |
2309 | VERIFY(nsp != NULL); | |
2310 | VERIFY(nsp->sl_refcnt == 1); | |
2311 | } | |
2312 | } | |
2313 | ||
2314 | if ((m_cobjlist(class) = (*list)->obj_next) != NULL && | |
2315 | !MBUF_IN_MAP(m_cobjlist(class))) { | |
2316 | slab_nextptr_panic(sp, m_cobjlist(class)); | |
2317 | /* NOTREACHED */ | |
2318 | } | |
2319 | (*list)->obj_next = NULL; | |
2320 | list = *plist = &(*list)->obj_next; | |
2321 | ||
2322 | if (--need == 0) | |
2323 | break; | |
2324 | } | |
2325 | m_infree(class) -= (num - need); | |
2326 | ||
2327 | return (num - need); | |
2328 | } | |
2329 | ||
2330 | /* | |
2331 | * Place object(s) back into a composite class's freelist. | |
2332 | */ | |
2333 | static unsigned int | |
2334 | cslab_free(mbuf_class_t class, mcache_obj_t *list, int purged) | |
2335 | { | |
2336 | mcache_obj_t *o, *tail; | |
2337 | unsigned int num = 0; | |
2338 | struct mbuf *m, *ms; | |
2339 | mcache_audit_t *mca = NULL; | |
2340 | mcache_obj_t *ref_list = NULL; | |
2341 | mcl_slab_t *clsp, *nsp; | |
2342 | void *cl; | |
6d2010ae | 2343 | mbuf_class_t cl_class; |
2d21ac55 A |
2344 | |
2345 | ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class)); | |
2346 | VERIFY(class != MC_MBUF_16KCL || njcl > 0); | |
2347 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
2348 | ||
6d2010ae A |
2349 | if (class == MC_MBUF_CL) { |
2350 | cl_class = MC_CL; | |
2351 | } else if (class == MC_MBUF_BIGCL) { | |
2352 | cl_class = MC_BIGCL; | |
2353 | } else { | |
2354 | VERIFY(class == MC_MBUF_16KCL); | |
2355 | cl_class = MC_16KCL; | |
2356 | } | |
2357 | ||
2d21ac55 A |
2358 | o = tail = list; |
2359 | ||
2360 | while ((m = ms = (struct mbuf *)o) != NULL) { | |
2361 | mcache_obj_t *rfa, *nexto = o->obj_next; | |
2362 | ||
2363 | /* Do the mbuf sanity checks */ | |
2364 | if (mclaudit != NULL) { | |
2365 | mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m); | |
6d2010ae A |
2366 | if (mclverify) { |
2367 | mcache_audit_free_verify(mca, m, 0, | |
2368 | m_maxsize(MC_MBUF)); | |
2369 | } | |
39236c6e | 2370 | ms = MCA_SAVED_MBUF_PTR(mca); |
2d21ac55 A |
2371 | } |
2372 | ||
2373 | /* Do the cluster sanity checks */ | |
2374 | cl = ms->m_ext.ext_buf; | |
2375 | clsp = slab_get(cl); | |
6d2010ae A |
2376 | if (mclverify) { |
2377 | size_t size = m_maxsize(cl_class); | |
2378 | mcache_audit_free_verify(mcl_audit_buf2mca(cl_class, | |
2d21ac55 A |
2379 | (mcache_obj_t *)cl), cl, 0, size); |
2380 | } | |
2381 | VERIFY(ms->m_type == MT_FREE); | |
2382 | VERIFY(ms->m_flags == M_EXT); | |
813fb2f6 | 2383 | VERIFY(m_get_rfa(ms) != NULL && MBUF_IS_COMPOSITE(ms)); |
6d2010ae A |
2384 | if (cl_class == MC_CL) { |
2385 | VERIFY(clsp->sl_refcnt >= 1 && | |
3e170ce0 | 2386 | clsp->sl_refcnt <= NCLPG); |
6d2010ae | 2387 | } else { |
39037602 | 2388 | VERIFY(clsp->sl_refcnt >= 1 && |
3e170ce0 | 2389 | clsp->sl_refcnt <= NBCLPG); |
6d2010ae A |
2390 | } |
2391 | if (cl_class == MC_16KCL) { | |
2d21ac55 | 2392 | int k; |
6d2010ae | 2393 | for (nsp = clsp, k = 1; k < NSLABSP16KB; k++) { |
2d21ac55 A |
2394 | nsp = nsp->sl_next; |
2395 | /* Next slab must already be present */ | |
2396 | VERIFY(nsp != NULL); | |
2397 | VERIFY(nsp->sl_refcnt == 1); | |
2398 | } | |
2399 | } | |
2400 | ||
2401 | /* | |
2402 | * If we're asked to purge, restore the actual mbuf using | |
2403 | * contents of the shadow structure (if auditing is enabled) | |
2404 | * and clear EXTF_COMPOSITE flag from the mbuf, as we are | |
2405 | * about to free it and the attached cluster into their caches. | |
2406 | */ | |
2407 | if (purged) { | |
2408 | /* Restore constructed mbuf fields */ | |
2409 | if (mclaudit != NULL) | |
2410 | mcl_audit_restore_mbuf(m, mca, TRUE); | |
2411 | ||
39037602 | 2412 | MEXT_MINREF(m) = 0; |
2d21ac55 | 2413 | MEXT_REF(m) = 0; |
39037602 | 2414 | MEXT_PREF(m) = 0; |
2d21ac55 | 2415 | MEXT_FLAGS(m) = 0; |
39037602 A |
2416 | MEXT_PRIV(m) = 0; |
2417 | MEXT_PMBUF(m) = NULL; | |
813fb2f6 | 2418 | MEXT_TOKEN(m) = 0; |
2d21ac55 | 2419 | |
813fb2f6 A |
2420 | rfa = (mcache_obj_t *)(void *)m_get_rfa(m); |
2421 | m_set_ext(m, NULL, NULL, NULL); | |
2d21ac55 A |
2422 | rfa->obj_next = ref_list; |
2423 | ref_list = rfa; | |
2d21ac55 A |
2424 | |
2425 | m->m_type = MT_FREE; | |
2426 | m->m_flags = m->m_len = 0; | |
2427 | m->m_next = m->m_nextpkt = NULL; | |
2428 | ||
2429 | /* Save mbuf fields and make auditing happy */ | |
2430 | if (mclaudit != NULL) | |
2431 | mcl_audit_mbuf(mca, o, FALSE, FALSE); | |
2432 | ||
2433 | VERIFY(m_total(class) > 0); | |
2434 | m_total(class)--; | |
2435 | ||
2436 | /* Free the mbuf */ | |
2437 | o->obj_next = NULL; | |
2438 | slab_free(MC_MBUF, o); | |
2439 | ||
2440 | /* And free the cluster */ | |
2441 | ((mcache_obj_t *)cl)->obj_next = NULL; | |
2442 | if (class == MC_MBUF_CL) | |
2443 | slab_free(MC_CL, cl); | |
2444 | else if (class == MC_MBUF_BIGCL) | |
2445 | slab_free(MC_BIGCL, cl); | |
2446 | else | |
2447 | slab_free(MC_16KCL, cl); | |
2448 | } | |
2449 | ||
2450 | ++num; | |
2451 | tail = o; | |
2452 | o = nexto; | |
2453 | } | |
2454 | ||
2455 | if (!purged) { | |
2456 | tail->obj_next = m_cobjlist(class); | |
2457 | m_cobjlist(class) = list; | |
2458 | m_infree(class) += num; | |
2459 | } else if (ref_list != NULL) { | |
2460 | mcache_free_ext(ref_cache, ref_list); | |
2461 | } | |
2462 | ||
2463 | return (num); | |
2464 | } | |
2465 | ||
2466 | /* | |
2467 | * Common allocator for composite objects called by the CPU cache layer | |
2468 | * during an allocation request whenever there is no available element in | |
2469 | * the bucket layer. It returns one or more composite elements from the | |
2470 | * appropriate global freelist. If the freelist is empty, it will attempt | |
2471 | * to obtain the rudimentary objects from their caches and construct them | |
2472 | * into composite mbuf + cluster objects. | |
2473 | */ | |
2474 | static unsigned int | |
2475 | mbuf_cslab_alloc(void *arg, mcache_obj_t ***plist, unsigned int needed, | |
2476 | int wait) | |
2477 | { | |
2478 | mbuf_class_t class = (mbuf_class_t)arg; | |
6d2010ae | 2479 | mbuf_class_t cl_class = 0; |
2d21ac55 A |
2480 | unsigned int num = 0, cnum = 0, want = needed; |
2481 | mcache_obj_t *ref_list = NULL; | |
2482 | mcache_obj_t *mp_list = NULL; | |
2483 | mcache_obj_t *clp_list = NULL; | |
2484 | mcache_obj_t **list; | |
2485 | struct ext_ref *rfa; | |
2486 | struct mbuf *m; | |
2487 | void *cl; | |
2488 | ||
2489 | ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class)); | |
2490 | ASSERT(needed > 0); | |
2491 | ||
2492 | VERIFY(class != MC_MBUF_16KCL || njcl > 0); | |
2493 | ||
2494 | /* There should not be any slab for this class */ | |
2495 | VERIFY(m_slab_cnt(class) == 0 && | |
2496 | m_slablist(class).tqh_first == NULL && | |
2497 | m_slablist(class).tqh_last == NULL); | |
2498 | ||
2499 | lck_mtx_lock(mbuf_mlock); | |
2500 | ||
2501 | /* Try using the freelist first */ | |
2502 | num = cslab_alloc(class, plist, needed); | |
2503 | list = *plist; | |
2504 | if (num == needed) { | |
2505 | m_alloc_cnt(class) += num; | |
2506 | lck_mtx_unlock(mbuf_mlock); | |
2507 | return (needed); | |
2508 | } | |
2509 | ||
2510 | lck_mtx_unlock(mbuf_mlock); | |
2511 | ||
2512 | /* | |
2513 | * We could not satisfy the request using the freelist alone; | |
2514 | * allocate from the appropriate rudimentary caches and use | |
2515 | * whatever we can get to construct the composite objects. | |
2516 | */ | |
2517 | needed -= num; | |
2518 | ||
2519 | /* | |
2520 | * Mark these allocation requests as coming from a composite cache. | |
2521 | * Also, if the caller is willing to be blocked, mark the request | |
2522 | * with MCR_FAILOK such that we don't end up sleeping at the mbuf | |
2523 | * slab layer waiting for the individual object when one or more | |
2524 | * of the already-constructed composite objects are available. | |
2525 | */ | |
2526 | wait |= MCR_COMP; | |
2527 | if (!(wait & MCR_NOSLEEP)) | |
2528 | wait |= MCR_FAILOK; | |
2529 | ||
6d2010ae | 2530 | /* allocate mbufs */ |
2d21ac55 A |
2531 | needed = mcache_alloc_ext(m_cache(MC_MBUF), &mp_list, needed, wait); |
2532 | if (needed == 0) { | |
2533 | ASSERT(mp_list == NULL); | |
2534 | goto fail; | |
2535 | } | |
6d2010ae A |
2536 | |
2537 | /* allocate clusters */ | |
2538 | if (class == MC_MBUF_CL) { | |
2539 | cl_class = MC_CL; | |
2540 | } else if (class == MC_MBUF_BIGCL) { | |
2541 | cl_class = MC_BIGCL; | |
2542 | } else { | |
2543 | VERIFY(class == MC_MBUF_16KCL); | |
2544 | cl_class = MC_16KCL; | |
2545 | } | |
2546 | needed = mcache_alloc_ext(m_cache(cl_class), &clp_list, needed, wait); | |
2d21ac55 A |
2547 | if (needed == 0) { |
2548 | ASSERT(clp_list == NULL); | |
2549 | goto fail; | |
2550 | } | |
6d2010ae | 2551 | |
2d21ac55 A |
2552 | needed = mcache_alloc_ext(ref_cache, &ref_list, needed, wait); |
2553 | if (needed == 0) { | |
2554 | ASSERT(ref_list == NULL); | |
2555 | goto fail; | |
2556 | } | |
2557 | ||
2558 | /* | |
2559 | * By this time "needed" is MIN(mbuf, cluster, ref). Any left | |
2560 | * overs will get freed accordingly before we return to caller. | |
2561 | */ | |
2562 | for (cnum = 0; cnum < needed; cnum++) { | |
2563 | struct mbuf *ms; | |
2564 | ||
2565 | m = ms = (struct mbuf *)mp_list; | |
2566 | mp_list = mp_list->obj_next; | |
2567 | ||
2568 | cl = clp_list; | |
2569 | clp_list = clp_list->obj_next; | |
2570 | ((mcache_obj_t *)cl)->obj_next = NULL; | |
2571 | ||
2572 | rfa = (struct ext_ref *)ref_list; | |
2573 | ref_list = ref_list->obj_next; | |
316670eb | 2574 | ((mcache_obj_t *)(void *)rfa)->obj_next = NULL; |
2d21ac55 A |
2575 | |
2576 | /* | |
2577 | * If auditing is enabled, construct the shadow mbuf | |
2578 | * in the audit structure instead of in the actual one. | |
2579 | * mbuf_cslab_audit() will take care of restoring the | |
2580 | * contents after the integrity check. | |
2581 | */ | |
2582 | if (mclaudit != NULL) { | |
2583 | mcache_audit_t *mca, *cl_mca; | |
2d21ac55 A |
2584 | |
2585 | lck_mtx_lock(mbuf_mlock); | |
2586 | mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m); | |
39236c6e | 2587 | ms = MCA_SAVED_MBUF_PTR(mca); |
3e170ce0 A |
2588 | cl_mca = mcl_audit_buf2mca(cl_class, |
2589 | (mcache_obj_t *)cl); | |
2d21ac55 A |
2590 | |
2591 | /* | |
2592 | * Pair them up. Note that this is done at the time | |
2593 | * the mbuf+cluster objects are constructed. This | |
2594 | * information should be treated as "best effort" | |
2595 | * debugging hint since more than one mbufs can refer | |
2596 | * to a cluster. In that case, the cluster might not | |
2597 | * be freed along with the mbuf it was paired with. | |
2598 | */ | |
2599 | mca->mca_uptr = cl_mca; | |
2600 | cl_mca->mca_uptr = mca; | |
2601 | ||
2602 | ASSERT(mca->mca_uflags & MB_SCVALID); | |
2603 | ASSERT(!(cl_mca->mca_uflags & MB_SCVALID)); | |
2604 | lck_mtx_unlock(mbuf_mlock); | |
2605 | ||
2606 | /* Technically, they are in the freelist */ | |
6d2010ae A |
2607 | if (mclverify) { |
2608 | size_t size; | |
2609 | ||
2610 | mcache_set_pattern(MCACHE_FREE_PATTERN, m, | |
2611 | m_maxsize(MC_MBUF)); | |
2612 | ||
2613 | if (class == MC_MBUF_CL) | |
2614 | size = m_maxsize(MC_CL); | |
2615 | else if (class == MC_MBUF_BIGCL) | |
2616 | size = m_maxsize(MC_BIGCL); | |
2617 | else | |
2618 | size = m_maxsize(MC_16KCL); | |
2619 | ||
2620 | mcache_set_pattern(MCACHE_FREE_PATTERN, cl, | |
2621 | size); | |
2622 | } | |
2d21ac55 A |
2623 | } |
2624 | ||
2625 | MBUF_INIT(ms, 0, MT_FREE); | |
2626 | if (class == MC_MBUF_16KCL) { | |
2627 | MBUF_16KCL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE); | |
2628 | } else if (class == MC_MBUF_BIGCL) { | |
2629 | MBUF_BIGCL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE); | |
2630 | } else { | |
2631 | MBUF_CL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE); | |
2632 | } | |
2633 | VERIFY(ms->m_flags == M_EXT); | |
813fb2f6 | 2634 | VERIFY(m_get_rfa(ms) != NULL && MBUF_IS_COMPOSITE(ms)); |
2d21ac55 A |
2635 | |
2636 | *list = (mcache_obj_t *)m; | |
2637 | (*list)->obj_next = NULL; | |
2638 | list = *plist = &(*list)->obj_next; | |
2639 | } | |
2640 | ||
2641 | fail: | |
2642 | /* | |
2643 | * Free up what's left of the above. | |
2644 | */ | |
2645 | if (mp_list != NULL) | |
2646 | mcache_free_ext(m_cache(MC_MBUF), mp_list); | |
2647 | if (clp_list != NULL) | |
6d2010ae | 2648 | mcache_free_ext(m_cache(cl_class), clp_list); |
2d21ac55 A |
2649 | if (ref_list != NULL) |
2650 | mcache_free_ext(ref_cache, ref_list); | |
2651 | ||
2652 | lck_mtx_lock(mbuf_mlock); | |
2653 | if (num > 0 || cnum > 0) { | |
2654 | m_total(class) += cnum; | |
2655 | VERIFY(m_total(class) <= m_maxlimit(class)); | |
2656 | m_alloc_cnt(class) += num + cnum; | |
2657 | } | |
2658 | if ((num + cnum) < want) | |
2659 | m_fail_cnt(class) += (want - (num + cnum)); | |
2660 | lck_mtx_unlock(mbuf_mlock); | |
2661 | ||
2662 | return (num + cnum); | |
2663 | } | |
2664 | ||
2665 | /* | |
2666 | * Common de-allocator for composite objects called by the CPU cache | |
2667 | * layer when one or more elements need to be returned to the appropriate | |
2668 | * global freelist. | |
2669 | */ | |
2670 | static void | |
2671 | mbuf_cslab_free(void *arg, mcache_obj_t *list, int purged) | |
2672 | { | |
2673 | mbuf_class_t class = (mbuf_class_t)arg; | |
2674 | unsigned int num; | |
2675 | int w; | |
2676 | ||
2677 | ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class)); | |
2678 | ||
2679 | lck_mtx_lock(mbuf_mlock); | |
2680 | ||
2681 | num = cslab_free(class, list, purged); | |
2682 | m_free_cnt(class) += num; | |
2683 | ||
2684 | if ((w = mb_waiters) > 0) | |
2685 | mb_waiters = 0; | |
2686 | ||
2687 | lck_mtx_unlock(mbuf_mlock); | |
2688 | ||
2689 | if (w != 0) | |
2690 | wakeup(mb_waitchan); | |
2691 | } | |
2692 | ||
2693 | /* | |
2694 | * Common auditor for composite objects called by the CPU cache layer | |
2695 | * during an allocation or free request. For the former, this is called | |
2696 | * after the objects are obtained from either the bucket or slab layer | |
2697 | * and before they are returned to the caller. For the latter, this is | |
2698 | * called immediately during free and before placing the objects into | |
2699 | * the bucket or slab layer. | |
2700 | */ | |
2701 | static void | |
2702 | mbuf_cslab_audit(void *arg, mcache_obj_t *list, boolean_t alloc) | |
2703 | { | |
3e170ce0 | 2704 | mbuf_class_t class = (mbuf_class_t)arg, cl_class; |
2d21ac55 A |
2705 | mcache_audit_t *mca; |
2706 | struct mbuf *m, *ms; | |
2707 | mcl_slab_t *clsp, *nsp; | |
3e170ce0 | 2708 | size_t cl_size; |
2d21ac55 A |
2709 | void *cl; |
2710 | ||
2711 | ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class)); | |
3e170ce0 A |
2712 | if (class == MC_MBUF_CL) |
2713 | cl_class = MC_CL; | |
2714 | else if (class == MC_MBUF_BIGCL) | |
2715 | cl_class = MC_BIGCL; | |
2716 | else | |
2717 | cl_class = MC_16KCL; | |
2718 | cl_size = m_maxsize(cl_class); | |
2d21ac55 A |
2719 | |
2720 | while ((m = ms = (struct mbuf *)list) != NULL) { | |
2721 | lck_mtx_lock(mbuf_mlock); | |
2722 | /* Do the mbuf sanity checks and record its transaction */ | |
2723 | mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m); | |
2724 | mcl_audit_mbuf(mca, m, TRUE, alloc); | |
6d2010ae | 2725 | if (mcltrace) |
39236c6e | 2726 | mcache_buffer_log(mca, m, m_cache(class), &mb_start); |
6d2010ae | 2727 | |
2d21ac55 A |
2728 | if (alloc) |
2729 | mca->mca_uflags |= MB_COMP_INUSE; | |
2730 | else | |
2731 | mca->mca_uflags &= ~MB_COMP_INUSE; | |
2732 | ||
2733 | /* | |
2734 | * Use the shadow mbuf in the audit structure if we are | |
2735 | * freeing, since the contents of the actual mbuf has been | |
2736 | * pattern-filled by the above call to mcl_audit_mbuf(). | |
2737 | */ | |
6d2010ae | 2738 | if (!alloc && mclverify) |
39236c6e | 2739 | ms = MCA_SAVED_MBUF_PTR(mca); |
2d21ac55 A |
2740 | |
2741 | /* Do the cluster sanity checks and record its transaction */ | |
2742 | cl = ms->m_ext.ext_buf; | |
2743 | clsp = slab_get(cl); | |
2744 | VERIFY(ms->m_flags == M_EXT && cl != NULL); | |
813fb2f6 | 2745 | VERIFY(m_get_rfa(ms) != NULL && MBUF_IS_COMPOSITE(ms)); |
6d2010ae A |
2746 | if (class == MC_MBUF_CL) |
2747 | VERIFY(clsp->sl_refcnt >= 1 && | |
3e170ce0 | 2748 | clsp->sl_refcnt <= NCLPG); |
6d2010ae | 2749 | else |
3e170ce0 A |
2750 | VERIFY(clsp->sl_refcnt >= 1 && |
2751 | clsp->sl_refcnt <= NBCLPG); | |
6d2010ae A |
2752 | |
2753 | if (class == MC_MBUF_16KCL) { | |
2d21ac55 | 2754 | int k; |
6d2010ae | 2755 | for (nsp = clsp, k = 1; k < NSLABSP16KB; k++) { |
2d21ac55 A |
2756 | nsp = nsp->sl_next; |
2757 | /* Next slab must already be present */ | |
2758 | VERIFY(nsp != NULL); | |
2759 | VERIFY(nsp->sl_refcnt == 1); | |
2760 | } | |
2761 | } | |
2762 | ||
3e170ce0 A |
2763 | |
2764 | mca = mcl_audit_buf2mca(cl_class, cl); | |
2765 | mcl_audit_cluster(mca, cl, cl_size, alloc, FALSE); | |
6d2010ae | 2766 | if (mcltrace) |
39236c6e | 2767 | mcache_buffer_log(mca, cl, m_cache(class), &mb_start); |
6d2010ae | 2768 | |
2d21ac55 A |
2769 | if (alloc) |
2770 | mca->mca_uflags |= MB_COMP_INUSE; | |
2771 | else | |
2772 | mca->mca_uflags &= ~MB_COMP_INUSE; | |
2773 | lck_mtx_unlock(mbuf_mlock); | |
2774 | ||
2775 | list = list->obj_next; | |
2776 | } | |
2777 | } | |
2778 | ||
2779 | /* | |
2780 | * Allocate some number of mbuf clusters and place on cluster freelist. | |
2781 | */ | |
2782 | static int | |
2783 | m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize) | |
2784 | { | |
3e170ce0 | 2785 | int i, count = 0; |
2d21ac55 | 2786 | vm_size_t size = 0; |
3e170ce0 | 2787 | int numpages = 0, large_buffer; |
2d21ac55 A |
2788 | vm_offset_t page = 0; |
2789 | mcache_audit_t *mca_list = NULL; | |
2790 | mcache_obj_t *con_list = NULL; | |
2791 | mcl_slab_t *sp; | |
3e170ce0 | 2792 | mbuf_class_t class; |
2d21ac55 | 2793 | |
3e170ce0 A |
2794 | /* Set if a buffer allocation needs allocation of multiple pages */ |
2795 | large_buffer = ((bufsize == m_maxsize(MC_16KCL)) && | |
2796 | PAGE_SIZE < M16KCLBYTES); | |
6d2010ae A |
2797 | VERIFY(bufsize == m_maxsize(MC_BIGCL) || |
2798 | bufsize == m_maxsize(MC_16KCL)); | |
2d21ac55 | 2799 | |
3e170ce0 A |
2800 | VERIFY((bufsize == PAGE_SIZE) || |
2801 | (bufsize > PAGE_SIZE && bufsize == m_maxsize(MC_16KCL))); | |
2802 | ||
2803 | if (bufsize == m_size(MC_BIGCL)) | |
2804 | class = MC_BIGCL; | |
2805 | else | |
2806 | class = MC_16KCL; | |
2807 | ||
2d21ac55 A |
2808 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); |
2809 | ||
2810 | /* | |
2811 | * Multiple threads may attempt to populate the cluster map one | |
2812 | * after another. Since we drop the lock below prior to acquiring | |
2813 | * the physical page(s), our view of the cluster map may no longer | |
2814 | * be accurate, and we could end up over-committing the pages beyond | |
2815 | * the maximum allowed for each class. To prevent it, this entire | |
2816 | * operation (including the page mapping) is serialized. | |
2817 | */ | |
2818 | while (mb_clalloc_busy) { | |
2819 | mb_clalloc_waiters++; | |
2820 | (void) msleep(mb_clalloc_waitchan, mbuf_mlock, | |
2821 | (PZERO-1), "m_clalloc", NULL); | |
2822 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
2823 | } | |
2824 | ||
2825 | /* We are busy now; tell everyone else to go away */ | |
2826 | mb_clalloc_busy = TRUE; | |
2827 | ||
2828 | /* | |
2829 | * Honor the caller's wish to block or not block. We have a way | |
2830 | * to grow the pool asynchronously using the mbuf worker thread. | |
2831 | */ | |
2832 | i = m_howmany(num, bufsize); | |
2833 | if (i == 0 || (wait & M_DONTWAIT)) | |
2834 | goto out; | |
2835 | ||
2836 | lck_mtx_unlock(mbuf_mlock); | |
2837 | ||
b0d623f7 A |
2838 | size = round_page(i * bufsize); |
2839 | page = kmem_mb_alloc(mb_map, size, large_buffer); | |
2840 | ||
2841 | /* | |
6d2010ae | 2842 | * If we did ask for "n" 16KB physically contiguous chunks |
b0d623f7 A |
2843 | * and didn't get them, then please try again without this |
2844 | * restriction. | |
2845 | */ | |
2846 | if (large_buffer && page == 0) | |
2847 | page = kmem_mb_alloc(mb_map, size, 0); | |
2d21ac55 A |
2848 | |
2849 | if (page == 0) { | |
6d2010ae | 2850 | if (bufsize == m_maxsize(MC_BIGCL)) { |
3e170ce0 A |
2851 | /* Try for 1 page if failed */ |
2852 | size = PAGE_SIZE; | |
b0d623f7 | 2853 | page = kmem_mb_alloc(mb_map, size, 0); |
2d21ac55 A |
2854 | } |
2855 | ||
2856 | if (page == 0) { | |
2857 | lck_mtx_lock(mbuf_mlock); | |
2858 | goto out; | |
2859 | } | |
2860 | } | |
2861 | ||
3e170ce0 A |
2862 | VERIFY(IS_P2ALIGNED(page, PAGE_SIZE)); |
2863 | numpages = size / PAGE_SIZE; | |
2d21ac55 A |
2864 | |
2865 | /* If auditing is enabled, allocate the audit structures now */ | |
2866 | if (mclaudit != NULL) { | |
2867 | int needed; | |
2868 | ||
2869 | /* | |
2870 | * Yes, I realize this is a waste of memory for clusters | |
2871 | * that never get transformed into mbufs, as we may end | |
3e170ce0 | 2872 | * up with NMBPG-1 unused audit structures per cluster. |
2d21ac55 A |
2873 | * But doing so tremendously simplifies the allocation |
2874 | * strategy, since at this point we are not holding the | |
6d2010ae | 2875 | * mbuf lock and the caller is okay to be blocked. |
2d21ac55 | 2876 | */ |
3e170ce0 A |
2877 | if (bufsize == PAGE_SIZE) { |
2878 | needed = numpages * NMBPG; | |
2d21ac55 A |
2879 | |
2880 | i = mcache_alloc_ext(mcl_audit_con_cache, | |
2881 | &con_list, needed, MCR_SLEEP); | |
2882 | ||
2883 | VERIFY(con_list != NULL && i == needed); | |
2d21ac55 | 2884 | } else { |
3e170ce0 A |
2885 | /* |
2886 | * if multiple 4K pages are being used for a | |
39037602 A |
2887 | * 16K cluster |
2888 | */ | |
6d2010ae | 2889 | needed = numpages / NSLABSP16KB; |
2d21ac55 A |
2890 | } |
2891 | ||
2892 | i = mcache_alloc_ext(mcache_audit_cache, | |
2893 | (mcache_obj_t **)&mca_list, needed, MCR_SLEEP); | |
2894 | ||
2895 | VERIFY(mca_list != NULL && i == needed); | |
2896 | } | |
2897 | ||
2898 | lck_mtx_lock(mbuf_mlock); | |
2899 | ||
3e170ce0 A |
2900 | for (i = 0; i < numpages; i++, page += PAGE_SIZE) { |
2901 | ppnum_t offset = | |
2902 | ((unsigned char *)page - mbutl) >> PAGE_SHIFT; | |
99c3a104 | 2903 | ppnum_t new_page = pmap_find_phys(kernel_pmap, page); |
2d21ac55 A |
2904 | |
2905 | /* | |
3e170ce0 A |
2906 | * If there is a mapper the appropriate I/O page is |
2907 | * returned; zero out the page to discard its past | |
2908 | * contents to prevent exposing leftover kernel memory. | |
2d21ac55 | 2909 | */ |
b0d623f7 | 2910 | VERIFY(offset < mcl_pages); |
39236c6e | 2911 | if (mcl_paddr_base != 0) { |
3e170ce0 | 2912 | bzero((void *)(uintptr_t) page, PAGE_SIZE); |
39236c6e A |
2913 | new_page = IOMapperInsertPage(mcl_paddr_base, |
2914 | offset, new_page); | |
99c3a104 | 2915 | } |
39236c6e | 2916 | mcl_paddr[offset] = new_page; |
2d21ac55 A |
2917 | |
2918 | /* Pattern-fill this fresh page */ | |
6d2010ae | 2919 | if (mclverify) { |
2d21ac55 | 2920 | mcache_set_pattern(MCACHE_FREE_PATTERN, |
3e170ce0 | 2921 | (caddr_t)page, PAGE_SIZE); |
6d2010ae | 2922 | } |
3e170ce0 A |
2923 | if (bufsize == PAGE_SIZE) { |
2924 | mcache_obj_t *buf; | |
2d21ac55 | 2925 | /* One for the entire page */ |
3e170ce0 | 2926 | sp = slab_get((void *)page); |
6d2010ae | 2927 | if (mclaudit != NULL) { |
3e170ce0 A |
2928 | mcl_audit_init((void *)page, |
2929 | &mca_list, &con_list, | |
2930 | AUDIT_CONTENTS_SIZE, NMBPG); | |
6d2010ae | 2931 | } |
2d21ac55 | 2932 | VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0); |
3e170ce0 A |
2933 | slab_init(sp, class, SLF_MAPPED, (void *)page, |
2934 | (void *)page, PAGE_SIZE, 0, 1); | |
2935 | buf = (mcache_obj_t *)page; | |
2936 | buf->obj_next = NULL; | |
2d21ac55 | 2937 | |
2d21ac55 | 2938 | /* Insert this slab */ |
3e170ce0 A |
2939 | slab_insert(sp, class); |
2940 | ||
2941 | /* Update stats now since slab_get drops the lock */ | |
2942 | ++m_infree(class); | |
2943 | ++m_total(class); | |
2944 | VERIFY(m_total(class) <= m_maxlimit(class)); | |
2945 | if (class == MC_BIGCL) { | |
2946 | mbstat.m_bigclfree = m_infree(MC_BIGCL) + | |
2947 | m_infree(MC_MBUF_BIGCL); | |
2948 | mbstat.m_bigclusters = m_total(MC_BIGCL); | |
2949 | } | |
2950 | ++count; | |
2951 | } else if ((bufsize > PAGE_SIZE) && | |
2952 | (i % NSLABSP16KB) == 0) { | |
2d21ac55 A |
2953 | union m16kcluster *m16kcl = (union m16kcluster *)page; |
2954 | mcl_slab_t *nsp; | |
2955 | int k; | |
39037602 | 2956 | |
2d21ac55 A |
2957 | /* One for the entire 16KB */ |
2958 | sp = slab_get(m16kcl); | |
2959 | if (mclaudit != NULL) | |
2960 | mcl_audit_init(m16kcl, &mca_list, NULL, 0, 1); | |
2961 | ||
2962 | VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0); | |
2963 | slab_init(sp, MC_16KCL, SLF_MAPPED, | |
2964 | m16kcl, m16kcl, bufsize, 0, 1); | |
3e170ce0 | 2965 | m16kcl->m16kcl_next = NULL; |
2d21ac55 | 2966 | |
6d2010ae A |
2967 | /* |
2968 | * 2nd-Nth page's slab is part of the first one, | |
2969 | * where N is NSLABSP16KB. | |
2970 | */ | |
2971 | for (k = 1; k < NSLABSP16KB; k++) { | |
2972 | nsp = slab_get(((union mbigcluster *)page) + k); | |
2d21ac55 A |
2973 | VERIFY(nsp->sl_refcnt == 0 && |
2974 | nsp->sl_flags == 0); | |
2975 | slab_init(nsp, MC_16KCL, | |
2976 | SLF_MAPPED | SLF_PARTIAL, | |
2977 | m16kcl, NULL, 0, 0, 0); | |
2978 | } | |
2d21ac55 A |
2979 | /* Insert this slab */ |
2980 | slab_insert(sp, MC_16KCL); | |
2981 | ||
3e170ce0 A |
2982 | /* Update stats now since slab_get drops the lock */ |
2983 | ++m_infree(MC_16KCL); | |
2984 | ++m_total(MC_16KCL); | |
2d21ac55 | 2985 | VERIFY(m_total(MC_16KCL) <= m_maxlimit(MC_16KCL)); |
3e170ce0 | 2986 | ++count; |
2d21ac55 A |
2987 | } |
2988 | } | |
2989 | VERIFY(mca_list == NULL && con_list == NULL); | |
2990 | ||
3e170ce0 A |
2991 | if (!mb_peak_newreport && mbuf_report_usage(class)) |
2992 | mb_peak_newreport = TRUE; | |
2993 | ||
2d21ac55 A |
2994 | /* We're done; let others enter */ |
2995 | mb_clalloc_busy = FALSE; | |
2996 | if (mb_clalloc_waiters > 0) { | |
2997 | mb_clalloc_waiters = 0; | |
2998 | wakeup(mb_clalloc_waitchan); | |
2999 | } | |
3000 | ||
3e170ce0 | 3001 | return (count); |
2d21ac55 A |
3002 | out: |
3003 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
3004 | ||
3005 | /* We're done; let others enter */ | |
3006 | mb_clalloc_busy = FALSE; | |
3007 | if (mb_clalloc_waiters > 0) { | |
3008 | mb_clalloc_waiters = 0; | |
3009 | wakeup(mb_clalloc_waitchan); | |
3010 | } | |
3011 | ||
3012 | /* | |
3013 | * When non-blocking we kick a thread if we have to grow the | |
3014 | * pool or if the number of free clusters is less than requested. | |
3015 | */ | |
39037602 A |
3016 | if (i > 0 && mbuf_worker_ready && mbuf_worker_needs_wakeup) { |
3017 | wakeup((caddr_t)&mbuf_worker_needs_wakeup); | |
3018 | mbuf_worker_needs_wakeup = FALSE; | |
3019 | } | |
3e170ce0 | 3020 | if (class == MC_BIGCL) { |
2d21ac55 A |
3021 | if (i > 0) { |
3022 | /* | |
3023 | * Remember total number of 4KB clusters needed | |
3024 | * at this time. | |
3025 | */ | |
3026 | i += m_total(MC_BIGCL); | |
3027 | if (i > mbuf_expand_big) { | |
3028 | mbuf_expand_big = i; | |
2d21ac55 A |
3029 | } |
3030 | } | |
2d21ac55 A |
3031 | if (m_infree(MC_BIGCL) >= num) |
3032 | return (1); | |
3033 | } else { | |
3034 | if (i > 0) { | |
3035 | /* | |
3036 | * Remember total number of 16KB clusters needed | |
3037 | * at this time. | |
3038 | */ | |
3039 | i += m_total(MC_16KCL); | |
3040 | if (i > mbuf_expand_16k) { | |
3041 | mbuf_expand_16k = i; | |
2d21ac55 A |
3042 | } |
3043 | } | |
2d21ac55 A |
3044 | if (m_infree(MC_16KCL) >= num) |
3045 | return (1); | |
3046 | } | |
3047 | return (0); | |
3048 | } | |
3049 | ||
3050 | /* | |
3051 | * Populate the global freelist of the corresponding buffer class. | |
3052 | */ | |
3053 | static int | |
3054 | freelist_populate(mbuf_class_t class, unsigned int num, int wait) | |
3055 | { | |
3056 | mcache_obj_t *o = NULL; | |
6d2010ae | 3057 | int i, numpages = 0, count; |
3e170ce0 | 3058 | mbuf_class_t super_class; |
2d21ac55 A |
3059 | |
3060 | VERIFY(class == MC_MBUF || class == MC_CL || class == MC_BIGCL || | |
3061 | class == MC_16KCL); | |
3062 | ||
2d21ac55 A |
3063 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); |
3064 | ||
3e170ce0 A |
3065 | VERIFY(PAGE_SIZE == m_maxsize(MC_BIGCL) || |
3066 | PAGE_SIZE == m_maxsize(MC_16KCL)); | |
2d21ac55 | 3067 | |
3e170ce0 A |
3068 | if (m_maxsize(class) >= PAGE_SIZE) |
3069 | return(m_clalloc(num, wait, m_maxsize(class)) != 0); | |
2d21ac55 | 3070 | |
3e170ce0 A |
3071 | /* |
3072 | * The rest of the function will allocate pages and will slice | |
3073 | * them up into the right size | |
3074 | */ | |
2d21ac55 | 3075 | |
3e170ce0 A |
3076 | numpages = (num * m_size(class) + PAGE_SIZE - 1) / PAGE_SIZE; |
3077 | ||
3078 | /* Currently assume that pages are 4K or 16K */ | |
3079 | if (PAGE_SIZE == m_maxsize(MC_BIGCL)) | |
3080 | super_class = MC_BIGCL; | |
3081 | else | |
3082 | super_class = MC_16KCL; | |
2d21ac55 | 3083 | |
3e170ce0 A |
3084 | i = m_clalloc(numpages, wait, m_maxsize(super_class)); |
3085 | ||
3086 | /* Respect the minimum limit of super class */ | |
3087 | if (m_total(super_class) == m_maxlimit(super_class) && | |
3088 | m_infree(super_class) <= m_minlimit(super_class)) | |
3089 | if (wait & MCR_COMP) | |
3090 | return (0); | |
6d2010ae A |
3091 | |
3092 | /* how many objects will we cut the page into? */ | |
3e170ce0 | 3093 | int numobj = PAGE_SIZE / m_maxsize(class); |
6d2010ae A |
3094 | |
3095 | for (count = 0; count < numpages; count++) { | |
6d2010ae | 3096 | /* respect totals, minlimit, maxlimit */ |
3e170ce0 | 3097 | if (m_total(super_class) <= m_minlimit(super_class) || |
6d2010ae A |
3098 | m_total(class) >= m_maxlimit(class)) |
3099 | break; | |
3100 | ||
3e170ce0 | 3101 | if ((o = slab_alloc(super_class, wait)) == NULL) |
6d2010ae A |
3102 | break; |
3103 | ||
2d21ac55 | 3104 | struct mbuf *m = (struct mbuf *)o; |
6d2010ae | 3105 | union mcluster *c = (union mcluster *)o; |
3e170ce0 | 3106 | union mbigcluster *mbc = (union mbigcluster *)o; |
2d21ac55 | 3107 | mcl_slab_t *sp = slab_get(o); |
6d2010ae | 3108 | mcache_audit_t *mca = NULL; |
2d21ac55 | 3109 | |
3e170ce0 A |
3110 | /* |
3111 | * since one full page will be converted to MC_MBUF or | |
3112 | * MC_CL, verify that the reference count will match that | |
3113 | * assumption | |
3114 | */ | |
39037602 | 3115 | VERIFY(sp->sl_refcnt == 1 && slab_is_detached(sp)); |
3e170ce0 | 3116 | VERIFY((sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED); |
6d2010ae A |
3117 | /* |
3118 | * Make sure that the cluster is unmolested | |
3119 | * while in freelist | |
3120 | */ | |
3121 | if (mclverify) { | |
3e170ce0 A |
3122 | mca = mcl_audit_buf2mca(super_class, |
3123 | (mcache_obj_t *)o); | |
3124 | mcache_audit_free_verify(mca, | |
3125 | (mcache_obj_t *)o, 0, m_maxsize(super_class)); | |
2d21ac55 A |
3126 | } |
3127 | ||
3e170ce0 | 3128 | /* Reinitialize it as an mbuf or 2K or 4K slab */ |
6d2010ae | 3129 | slab_init(sp, class, sp->sl_flags, |
3e170ce0 | 3130 | sp->sl_base, NULL, PAGE_SIZE, 0, numobj); |
2d21ac55 | 3131 | |
2d21ac55 A |
3132 | VERIFY(sp->sl_head == NULL); |
3133 | ||
3e170ce0 A |
3134 | VERIFY(m_total(super_class) >= 1); |
3135 | m_total(super_class)--; | |
3136 | ||
3137 | if (super_class == MC_BIGCL) | |
3138 | mbstat.m_bigclusters = m_total(MC_BIGCL); | |
2d21ac55 | 3139 | |
6d2010ae A |
3140 | m_total(class) += numobj; |
3141 | m_infree(class) += numobj; | |
3142 | ||
fe8ab488 A |
3143 | if (!mb_peak_newreport && mbuf_report_usage(class)) |
3144 | mb_peak_newreport = TRUE; | |
6d2010ae A |
3145 | |
3146 | i = numobj; | |
3147 | if (class == MC_MBUF) { | |
3148 | mbstat.m_mbufs = m_total(MC_MBUF); | |
3e170ce0 | 3149 | mtype_stat_add(MT_FREE, NMBPG); |
6d2010ae A |
3150 | while (i--) { |
3151 | /* | |
3152 | * If auditing is enabled, construct the | |
3153 | * shadow mbuf in the audit structure | |
3154 | * instead of the actual one. | |
3155 | * mbuf_slab_audit() will take care of | |
3156 | * restoring the contents after the | |
3157 | * integrity check. | |
3158 | */ | |
3159 | if (mclaudit != NULL) { | |
3160 | struct mbuf *ms; | |
3161 | mca = mcl_audit_buf2mca(MC_MBUF, | |
3162 | (mcache_obj_t *)m); | |
39236c6e | 3163 | ms = MCA_SAVED_MBUF_PTR(mca); |
6d2010ae A |
3164 | ms->m_type = MT_FREE; |
3165 | } else { | |
3166 | m->m_type = MT_FREE; | |
3167 | } | |
3168 | m->m_next = sp->sl_head; | |
3169 | sp->sl_head = (void *)m++; | |
3170 | } | |
3e170ce0 | 3171 | } else if (class == MC_CL) { /* MC_CL */ |
6d2010ae A |
3172 | mbstat.m_clfree = |
3173 | m_infree(MC_CL) + m_infree(MC_MBUF_CL); | |
3174 | mbstat.m_clusters = m_total(MC_CL); | |
3175 | while (i--) { | |
3176 | c->mcl_next = sp->sl_head; | |
3177 | sp->sl_head = (void *)c++; | |
2d21ac55 | 3178 | } |
3e170ce0 A |
3179 | } else { |
3180 | VERIFY(class == MC_BIGCL); | |
3181 | mbstat.m_bigclusters = m_total(MC_BIGCL); | |
3182 | mbstat.m_bigclfree = m_infree(MC_BIGCL) + | |
3183 | m_infree(MC_MBUF_BIGCL); | |
3184 | while (i--) { | |
3185 | mbc->mbc_next = sp->sl_head; | |
3186 | sp->sl_head = (void *)mbc++; | |
3187 | } | |
2d21ac55 A |
3188 | } |
3189 | ||
3e170ce0 | 3190 | /* Insert into the mbuf or 2k or 4k slab list */ |
6d2010ae | 3191 | slab_insert(sp, class); |
2d21ac55 A |
3192 | |
3193 | if ((i = mb_waiters) > 0) | |
3194 | mb_waiters = 0; | |
3195 | if (i != 0) | |
3196 | wakeup(mb_waitchan); | |
2d21ac55 | 3197 | } |
6d2010ae A |
3198 | return (count != 0); |
3199 | } | |
2d21ac55 | 3200 | |
6d2010ae A |
3201 | /* |
3202 | * For each class, initialize the freelist to hold m_minlimit() objects. | |
3203 | */ | |
3204 | static void | |
3205 | freelist_init(mbuf_class_t class) | |
3206 | { | |
3207 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
3208 | ||
3209 | VERIFY(class == MC_CL || class == MC_BIGCL); | |
3210 | VERIFY(m_total(class) == 0); | |
3211 | VERIFY(m_minlimit(class) > 0); | |
3212 | ||
3213 | while (m_total(class) < m_minlimit(class)) | |
3214 | (void) freelist_populate(class, m_minlimit(class), M_WAIT); | |
3215 | ||
3216 | VERIFY(m_total(class) >= m_minlimit(class)); | |
2d21ac55 A |
3217 | } |
3218 | ||
3219 | /* | |
3220 | * (Inaccurately) check if it might be worth a trip back to the | |
3221 | * mcache layer due the availability of objects there. We'll | |
3222 | * end up back here if there's nothing up there. | |
3223 | */ | |
3224 | static boolean_t | |
3225 | mbuf_cached_above(mbuf_class_t class, int wait) | |
3226 | { | |
3227 | switch (class) { | |
3228 | case MC_MBUF: | |
3229 | if (wait & MCR_COMP) | |
3230 | return (!mcache_bkt_isempty(m_cache(MC_MBUF_CL)) || | |
3231 | !mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL))); | |
3232 | break; | |
3233 | ||
3234 | case MC_CL: | |
3235 | if (wait & MCR_COMP) | |
3236 | return (!mcache_bkt_isempty(m_cache(MC_MBUF_CL))); | |
3237 | break; | |
3238 | ||
3239 | case MC_BIGCL: | |
3240 | if (wait & MCR_COMP) | |
3241 | return (!mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL))); | |
3242 | break; | |
3243 | ||
3244 | case MC_16KCL: | |
3245 | if (wait & MCR_COMP) | |
3246 | return (!mcache_bkt_isempty(m_cache(MC_MBUF_16KCL))); | |
3247 | break; | |
3248 | ||
3249 | case MC_MBUF_CL: | |
3250 | case MC_MBUF_BIGCL: | |
3251 | case MC_MBUF_16KCL: | |
3252 | break; | |
3253 | ||
3254 | default: | |
3255 | VERIFY(0); | |
3256 | /* NOTREACHED */ | |
3257 | } | |
3258 | ||
3259 | return (!mcache_bkt_isempty(m_cache(class))); | |
3260 | } | |
3261 | ||
3262 | /* | |
3263 | * If possible, convert constructed objects to raw ones. | |
3264 | */ | |
3265 | static boolean_t | |
3266 | mbuf_steal(mbuf_class_t class, unsigned int num) | |
3267 | { | |
3268 | mcache_obj_t *top = NULL; | |
3269 | mcache_obj_t **list = ⊤ | |
3270 | unsigned int tot = 0; | |
3271 | ||
3272 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
3273 | ||
3274 | switch (class) { | |
3275 | case MC_MBUF: | |
3276 | case MC_CL: | |
3277 | case MC_BIGCL: | |
3278 | case MC_16KCL: | |
3279 | return (FALSE); | |
3280 | ||
3281 | case MC_MBUF_CL: | |
3282 | case MC_MBUF_BIGCL: | |
3283 | case MC_MBUF_16KCL: | |
3284 | /* Get the required number of constructed objects if possible */ | |
3285 | if (m_infree(class) > m_minlimit(class)) { | |
3286 | tot = cslab_alloc(class, &list, | |
3287 | MIN(num, m_infree(class))); | |
3288 | } | |
3289 | ||
3290 | /* And destroy them to get back the raw objects */ | |
3291 | if (top != NULL) | |
3292 | (void) cslab_free(class, top, 1); | |
3293 | break; | |
3294 | ||
3295 | default: | |
3296 | VERIFY(0); | |
3297 | /* NOTREACHED */ | |
3298 | } | |
3299 | ||
3300 | return (tot == num); | |
3301 | } | |
3302 | ||
3303 | static void | |
3304 | m_reclaim(mbuf_class_t class, unsigned int num, boolean_t comp) | |
3305 | { | |
3306 | int m, bmap = 0; | |
3307 | ||
3308 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
3309 | ||
3310 | VERIFY(m_total(MC_CL) <= m_maxlimit(MC_CL)); | |
3311 | VERIFY(m_total(MC_BIGCL) <= m_maxlimit(MC_BIGCL)); | |
3312 | VERIFY(m_total(MC_16KCL) <= m_maxlimit(MC_16KCL)); | |
3313 | ||
3314 | /* | |
3315 | * This logic can be made smarter; for now, simply mark | |
3316 | * all other related classes as potential victims. | |
3317 | */ | |
3318 | switch (class) { | |
3319 | case MC_MBUF: | |
3320 | m_wantpurge(MC_CL)++; | |
6d2010ae | 3321 | m_wantpurge(MC_BIGCL)++; |
2d21ac55 A |
3322 | m_wantpurge(MC_MBUF_CL)++; |
3323 | m_wantpurge(MC_MBUF_BIGCL)++; | |
3324 | break; | |
3325 | ||
3326 | case MC_CL: | |
3327 | m_wantpurge(MC_MBUF)++; | |
6d2010ae A |
3328 | m_wantpurge(MC_BIGCL)++; |
3329 | m_wantpurge(MC_MBUF_BIGCL)++; | |
2d21ac55 A |
3330 | if (!comp) |
3331 | m_wantpurge(MC_MBUF_CL)++; | |
3332 | break; | |
3333 | ||
3334 | case MC_BIGCL: | |
6d2010ae A |
3335 | m_wantpurge(MC_MBUF)++; |
3336 | m_wantpurge(MC_CL)++; | |
3337 | m_wantpurge(MC_MBUF_CL)++; | |
2d21ac55 A |
3338 | if (!comp) |
3339 | m_wantpurge(MC_MBUF_BIGCL)++; | |
3340 | break; | |
3341 | ||
3342 | case MC_16KCL: | |
3343 | if (!comp) | |
3344 | m_wantpurge(MC_MBUF_16KCL)++; | |
3345 | break; | |
3346 | ||
3347 | default: | |
3348 | VERIFY(0); | |
3349 | /* NOTREACHED */ | |
3350 | } | |
3351 | ||
3352 | /* | |
3353 | * Run through each marked class and check if we really need to | |
3354 | * purge (and therefore temporarily disable) the per-CPU caches | |
3355 | * layer used by the class. If so, remember the classes since | |
3356 | * we are going to drop the lock below prior to purging. | |
3357 | */ | |
3358 | for (m = 0; m < NELEM(mbuf_table); m++) { | |
3359 | if (m_wantpurge(m) > 0) { | |
3360 | m_wantpurge(m) = 0; | |
3361 | /* | |
3362 | * Try hard to steal the required number of objects | |
3363 | * from the freelist of other mbuf classes. Only | |
3364 | * purge and disable the per-CPU caches layer when | |
3365 | * we don't have enough; it's the last resort. | |
3366 | */ | |
3367 | if (!mbuf_steal(m, num)) | |
3368 | bmap |= (1 << m); | |
3369 | } | |
3370 | } | |
3371 | ||
3372 | lck_mtx_unlock(mbuf_mlock); | |
3373 | ||
3374 | if (bmap != 0) { | |
39236c6e A |
3375 | /* signal the domains to drain */ |
3376 | net_drain_domains(); | |
2d21ac55 A |
3377 | |
3378 | /* Sigh; we have no other choices but to ask mcache to purge */ | |
3379 | for (m = 0; m < NELEM(mbuf_table); m++) { | |
3380 | if ((bmap & (1 << m)) && | |
fe8ab488 | 3381 | mcache_purge_cache(m_cache(m), TRUE)) { |
2d21ac55 A |
3382 | lck_mtx_lock(mbuf_mlock); |
3383 | m_purge_cnt(m)++; | |
3384 | mbstat.m_drain++; | |
3385 | lck_mtx_unlock(mbuf_mlock); | |
3386 | } | |
3387 | } | |
3388 | } else { | |
3389 | /* | |
3390 | * Request mcache to reap extra elements from all of its caches; | |
3391 | * note that all reaps are serialized and happen only at a fixed | |
3392 | * interval. | |
3393 | */ | |
3394 | mcache_reap(); | |
3395 | } | |
3396 | lck_mtx_lock(mbuf_mlock); | |
3397 | } | |
3398 | ||
3399 | static inline struct mbuf * | |
3400 | m_get_common(int wait, short type, int hdr) | |
3401 | { | |
3402 | struct mbuf *m; | |
3403 | int mcflags = MSLEEPF(wait); | |
3404 | ||
3405 | /* Is this due to a non-blocking retry? If so, then try harder */ | |
3406 | if (mcflags & MCR_NOSLEEP) | |
3407 | mcflags |= MCR_TRYHARD; | |
3408 | ||
3409 | m = mcache_alloc(m_cache(MC_MBUF), mcflags); | |
3410 | if (m != NULL) { | |
3411 | MBUF_INIT(m, hdr, type); | |
3412 | mtype_stat_inc(type); | |
3413 | mtype_stat_dec(MT_FREE); | |
3414 | #if CONFIG_MACF_NET | |
3415 | if (hdr && mac_init_mbuf(m, wait) != 0) { | |
3416 | m_free(m); | |
3417 | return (NULL); | |
3418 | } | |
3419 | #endif /* MAC_NET */ | |
3420 | } | |
3421 | return (m); | |
3422 | } | |
3423 | ||
3424 | /* | |
3425 | * Space allocation routines; these are also available as macros | |
3426 | * for critical paths. | |
3427 | */ | |
3428 | #define _M_GET(wait, type) m_get_common(wait, type, 0) | |
3429 | #define _M_GETHDR(wait, type) m_get_common(wait, type, 1) | |
3430 | #define _M_RETRY(wait, type) _M_GET(wait, type) | |
3431 | #define _M_RETRYHDR(wait, type) _M_GETHDR(wait, type) | |
3432 | #define _MGET(m, how, type) ((m) = _M_GET(how, type)) | |
3433 | #define _MGETHDR(m, how, type) ((m) = _M_GETHDR(how, type)) | |
3434 | ||
3435 | struct mbuf * | |
3436 | m_get(int wait, int type) | |
3437 | { | |
3438 | return (_M_GET(wait, type)); | |
3439 | } | |
3440 | ||
3441 | struct mbuf * | |
3442 | m_gethdr(int wait, int type) | |
3443 | { | |
3444 | return (_M_GETHDR(wait, type)); | |
3445 | } | |
3446 | ||
3447 | struct mbuf * | |
3448 | m_retry(int wait, int type) | |
3449 | { | |
3450 | return (_M_RETRY(wait, type)); | |
3451 | } | |
3452 | ||
3453 | struct mbuf * | |
3454 | m_retryhdr(int wait, int type) | |
3455 | { | |
3456 | return (_M_RETRYHDR(wait, type)); | |
3457 | } | |
3458 | ||
3459 | struct mbuf * | |
3460 | m_getclr(int wait, int type) | |
3461 | { | |
3462 | struct mbuf *m; | |
3463 | ||
3464 | _MGET(m, wait, type); | |
3465 | if (m != NULL) | |
3466 | bzero(MTOD(m, caddr_t), MLEN); | |
3467 | return (m); | |
3468 | } | |
3469 | ||
39037602 A |
3470 | static int |
3471 | m_free_paired(struct mbuf *m) | |
3472 | { | |
3473 | VERIFY((m->m_flags & M_EXT) && (MEXT_FLAGS(m) & EXTF_PAIRED)); | |
3474 | ||
3475 | membar_sync(); | |
3476 | if (MEXT_PMBUF(m) == m) { | |
3477 | volatile UInt16 *addr = (volatile UInt16 *)&MEXT_PREF(m); | |
3478 | int16_t oprefcnt, prefcnt; | |
3479 | ||
3480 | /* | |
3481 | * Paired ref count might be negative in case we lose | |
3482 | * against another thread clearing MEXT_PMBUF, in the | |
3483 | * event it occurs after the above memory barrier sync. | |
3484 | * In that case just ignore as things have been unpaired. | |
3485 | */ | |
3486 | do { | |
3487 | oprefcnt = *addr; | |
3488 | prefcnt = oprefcnt - 1; | |
3489 | } while (!OSCompareAndSwap16(oprefcnt, prefcnt, addr)); | |
3490 | ||
3491 | if (prefcnt > 1) { | |
3492 | return (1); | |
3493 | } else if (prefcnt == 1) { | |
813fb2f6 A |
3494 | (*(m_get_ext_free(m)))(m->m_ext.ext_buf, |
3495 | m->m_ext.ext_size, m_get_ext_arg(m)); | |
39037602 A |
3496 | return (1); |
3497 | } else if (prefcnt == 0) { | |
3498 | VERIFY(MBUF_IS_PAIRED(m)); | |
3499 | ||
3500 | /* | |
3501 | * Restore minref to its natural value, so that | |
3502 | * the caller will be able to free the cluster | |
3503 | * as appropriate. | |
3504 | */ | |
3505 | MEXT_MINREF(m) = 0; | |
3506 | ||
3507 | /* | |
3508 | * Clear MEXT_PMBUF, but leave EXTF_PAIRED intact | |
3509 | * as it is immutable. atomic_set_ptr also causes | |
3510 | * memory barrier sync. | |
3511 | */ | |
3512 | atomic_set_ptr(&MEXT_PMBUF(m), NULL); | |
3513 | ||
3514 | switch (m->m_ext.ext_size) { | |
3515 | case MCLBYTES: | |
813fb2f6 | 3516 | m_set_ext(m, m_get_rfa(m), NULL, NULL); |
39037602 A |
3517 | break; |
3518 | ||
3519 | case MBIGCLBYTES: | |
813fb2f6 | 3520 | m_set_ext(m, m_get_rfa(m), m_bigfree, NULL); |
39037602 A |
3521 | break; |
3522 | ||
3523 | case M16KCLBYTES: | |
813fb2f6 | 3524 | m_set_ext(m, m_get_rfa(m), m_16kfree, NULL); |
39037602 A |
3525 | break; |
3526 | ||
3527 | default: | |
3528 | VERIFY(0); | |
3529 | /* NOTREACHED */ | |
3530 | } | |
3531 | } | |
3532 | } | |
3533 | ||
3534 | /* | |
3535 | * Tell caller the unpair has occurred, and that the reference | |
3536 | * count on the external cluster held for the paired mbuf should | |
3537 | * now be dropped. | |
3538 | */ | |
3539 | return (0); | |
3540 | } | |
3541 | ||
2d21ac55 A |
3542 | struct mbuf * |
3543 | m_free(struct mbuf *m) | |
3544 | { | |
3545 | struct mbuf *n = m->m_next; | |
3546 | ||
3547 | if (m->m_type == MT_FREE) | |
3548 | panic("m_free: freeing an already freed mbuf"); | |
3549 | ||
2d21ac55 | 3550 | if (m->m_flags & M_PKTHDR) { |
39236c6e A |
3551 | /* Check for scratch area overflow */ |
3552 | m_redzone_verify(m); | |
3553 | /* Free the aux data and tags if there is any */ | |
2d21ac55 | 3554 | m_tag_delete_chain(m, NULL); |
39037602 A |
3555 | |
3556 | m_do_tx_compl_callback(m, NULL); | |
2d21ac55 A |
3557 | } |
3558 | ||
3559 | if (m->m_flags & M_EXT) { | |
39037602 | 3560 | u_int16_t refcnt; |
6d2010ae | 3561 | u_int32_t composite; |
813fb2f6 | 3562 | m_ext_free_func_t m_free_func; |
2d21ac55 | 3563 | |
39037602 A |
3564 | if (MBUF_IS_PAIRED(m) && m_free_paired(m)) |
3565 | return (n); | |
3566 | ||
2d21ac55 | 3567 | refcnt = m_decref(m); |
6d2010ae | 3568 | composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE); |
813fb2f6 | 3569 | m_free_func = m_get_ext_free(m); |
39037602 A |
3570 | |
3571 | if (refcnt == MEXT_MINREF(m) && !composite) { | |
813fb2f6 | 3572 | if (m_free_func == NULL) { |
2d21ac55 | 3573 | mcache_free(m_cache(MC_CL), m->m_ext.ext_buf); |
813fb2f6 | 3574 | } else if (m_free_func == m_bigfree) { |
2d21ac55 A |
3575 | mcache_free(m_cache(MC_BIGCL), |
3576 | m->m_ext.ext_buf); | |
813fb2f6 | 3577 | } else if (m_free_func == m_16kfree) { |
2d21ac55 A |
3578 | mcache_free(m_cache(MC_16KCL), |
3579 | m->m_ext.ext_buf); | |
3580 | } else { | |
813fb2f6 A |
3581 | (*m_free_func)(m->m_ext.ext_buf, |
3582 | m->m_ext.ext_size, m_get_ext_arg(m)); | |
2d21ac55 | 3583 | } |
813fb2f6 A |
3584 | mcache_free(ref_cache, m_get_rfa(m)); |
3585 | m_set_ext(m, NULL, NULL, NULL); | |
39037602 A |
3586 | } else if (refcnt == MEXT_MINREF(m) && composite) { |
3587 | VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED)); | |
2d21ac55 A |
3588 | VERIFY(m->m_type != MT_FREE); |
3589 | ||
3590 | mtype_stat_dec(m->m_type); | |
3591 | mtype_stat_inc(MT_FREE); | |
3592 | ||
3593 | m->m_type = MT_FREE; | |
3594 | m->m_flags = M_EXT; | |
3595 | m->m_len = 0; | |
3596 | m->m_next = m->m_nextpkt = NULL; | |
3597 | ||
6d2010ae A |
3598 | MEXT_FLAGS(m) &= ~EXTF_READONLY; |
3599 | ||
2d21ac55 | 3600 | /* "Free" into the intermediate cache */ |
813fb2f6 | 3601 | if (m_free_func == NULL) { |
2d21ac55 | 3602 | mcache_free(m_cache(MC_MBUF_CL), m); |
813fb2f6 | 3603 | } else if (m_free_func == m_bigfree) { |
2d21ac55 A |
3604 | mcache_free(m_cache(MC_MBUF_BIGCL), m); |
3605 | } else { | |
813fb2f6 | 3606 | VERIFY(m_free_func == m_16kfree); |
2d21ac55 A |
3607 | mcache_free(m_cache(MC_MBUF_16KCL), m); |
3608 | } | |
3609 | return (n); | |
3610 | } | |
3611 | } | |
3612 | ||
3613 | if (m->m_type != MT_FREE) { | |
3614 | mtype_stat_dec(m->m_type); | |
3615 | mtype_stat_inc(MT_FREE); | |
3616 | } | |
3617 | ||
3618 | m->m_type = MT_FREE; | |
3619 | m->m_flags = m->m_len = 0; | |
3620 | m->m_next = m->m_nextpkt = NULL; | |
3621 | ||
3622 | mcache_free(m_cache(MC_MBUF), m); | |
3623 | ||
3624 | return (n); | |
3625 | } | |
3626 | ||
3627 | __private_extern__ struct mbuf * | |
3628 | m_clattach(struct mbuf *m, int type, caddr_t extbuf, | |
3629 | void (*extfree)(caddr_t, u_int, caddr_t), u_int extsize, caddr_t extarg, | |
39037602 | 3630 | int wait, int pair) |
2d21ac55 A |
3631 | { |
3632 | struct ext_ref *rfa = NULL; | |
3633 | ||
39037602 A |
3634 | /* |
3635 | * If pairing is requested and an existing mbuf is provided, reject | |
3636 | * it if it's already been paired to another cluster. Otherwise, | |
3637 | * allocate a new one or free any existing below. | |
3638 | */ | |
3639 | if ((m != NULL && MBUF_IS_PAIRED(m)) || | |
3640 | (m == NULL && (m = _M_GETHDR(wait, type)) == NULL)) | |
2d21ac55 A |
3641 | return (NULL); |
3642 | ||
3643 | if (m->m_flags & M_EXT) { | |
39037602 | 3644 | u_int16_t refcnt; |
6d2010ae | 3645 | u_int32_t composite; |
813fb2f6 | 3646 | m_ext_free_func_t m_free_func; |
2d21ac55 A |
3647 | |
3648 | refcnt = m_decref(m); | |
6d2010ae | 3649 | composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE); |
39037602 | 3650 | VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED) && MEXT_PMBUF(m) == NULL); |
813fb2f6 | 3651 | m_free_func = m_get_ext_free(m); |
39037602 | 3652 | if (refcnt == MEXT_MINREF(m) && !composite) { |
813fb2f6 | 3653 | if (m_free_func == NULL) { |
2d21ac55 | 3654 | mcache_free(m_cache(MC_CL), m->m_ext.ext_buf); |
813fb2f6 | 3655 | } else if (m_free_func == m_bigfree) { |
2d21ac55 A |
3656 | mcache_free(m_cache(MC_BIGCL), |
3657 | m->m_ext.ext_buf); | |
813fb2f6 | 3658 | } else if (m_free_func == m_16kfree) { |
2d21ac55 A |
3659 | mcache_free(m_cache(MC_16KCL), |
3660 | m->m_ext.ext_buf); | |
3661 | } else { | |
813fb2f6 A |
3662 | (*m_free_func)(m->m_ext.ext_buf, |
3663 | m->m_ext.ext_size, m_get_ext_arg(m)); | |
2d21ac55 A |
3664 | } |
3665 | /* Re-use the reference structure */ | |
813fb2f6 | 3666 | rfa = m_get_rfa(m); |
39037602 | 3667 | } else if (refcnt == MEXT_MINREF(m) && composite) { |
2d21ac55 A |
3668 | VERIFY(m->m_type != MT_FREE); |
3669 | ||
3670 | mtype_stat_dec(m->m_type); | |
3671 | mtype_stat_inc(MT_FREE); | |
3672 | ||
3673 | m->m_type = MT_FREE; | |
3674 | m->m_flags = M_EXT; | |
3675 | m->m_len = 0; | |
3676 | m->m_next = m->m_nextpkt = NULL; | |
6d2010ae A |
3677 | |
3678 | MEXT_FLAGS(m) &= ~EXTF_READONLY; | |
3679 | ||
2d21ac55 | 3680 | /* "Free" into the intermediate cache */ |
813fb2f6 | 3681 | if (m_free_func == NULL) { |
2d21ac55 | 3682 | mcache_free(m_cache(MC_MBUF_CL), m); |
813fb2f6 | 3683 | } else if (m_free_func == m_bigfree) { |
2d21ac55 A |
3684 | mcache_free(m_cache(MC_MBUF_BIGCL), m); |
3685 | } else { | |
813fb2f6 | 3686 | VERIFY(m_free_func == m_16kfree); |
2d21ac55 A |
3687 | mcache_free(m_cache(MC_MBUF_16KCL), m); |
3688 | } | |
3689 | /* | |
3690 | * Allocate a new mbuf, since we didn't divorce | |
3691 | * the composite mbuf + cluster pair above. | |
3692 | */ | |
3693 | if ((m = _M_GETHDR(wait, type)) == NULL) | |
3694 | return (NULL); | |
3695 | } | |
3696 | } | |
3697 | ||
3698 | if (rfa == NULL && | |
3699 | (rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) { | |
3700 | m_free(m); | |
3701 | return (NULL); | |
3702 | } | |
3703 | ||
39037602 A |
3704 | if (!pair) { |
3705 | MEXT_INIT(m, extbuf, extsize, extfree, extarg, rfa, | |
3706 | 0, 1, 0, 0, 0, NULL); | |
3707 | } else { | |
3708 | MEXT_INIT(m, extbuf, extsize, extfree, (caddr_t)m, rfa, | |
3709 | 1, 1, 1, EXTF_PAIRED, 0, m); | |
3710 | } | |
2d21ac55 A |
3711 | |
3712 | return (m); | |
3713 | } | |
3714 | ||
b0d623f7 A |
3715 | /* |
3716 | * Perform `fast' allocation mbuf clusters from a cache of recently-freed | |
3717 | * clusters. (If the cache is empty, new clusters are allocated en-masse.) | |
3718 | */ | |
3719 | struct mbuf * | |
3720 | m_getcl(int wait, int type, int flags) | |
3721 | { | |
3722 | struct mbuf *m; | |
3723 | int mcflags = MSLEEPF(wait); | |
3724 | int hdr = (flags & M_PKTHDR); | |
3725 | ||
3726 | /* Is this due to a non-blocking retry? If so, then try harder */ | |
3727 | if (mcflags & MCR_NOSLEEP) | |
3728 | mcflags |= MCR_TRYHARD; | |
3729 | ||
6d2010ae A |
3730 | m = mcache_alloc(m_cache(MC_MBUF_CL), mcflags); |
3731 | if (m != NULL) { | |
39037602 | 3732 | u_int16_t flag; |
6d2010ae A |
3733 | struct ext_ref *rfa; |
3734 | void *cl; | |
3735 | ||
3736 | VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT); | |
3737 | cl = m->m_ext.ext_buf; | |
813fb2f6 | 3738 | rfa = m_get_rfa(m); |
6d2010ae A |
3739 | |
3740 | ASSERT(cl != NULL && rfa != NULL); | |
813fb2f6 | 3741 | VERIFY(MBUF_IS_COMPOSITE(m) && m_get_ext_free(m) == NULL); |
6d2010ae A |
3742 | |
3743 | flag = MEXT_FLAGS(m); | |
3744 | ||
b0d623f7 | 3745 | MBUF_INIT(m, hdr, type); |
6d2010ae A |
3746 | MBUF_CL_INIT(m, cl, rfa, 1, flag); |
3747 | ||
b0d623f7 A |
3748 | mtype_stat_inc(type); |
3749 | mtype_stat_dec(MT_FREE); | |
3750 | #if CONFIG_MACF_NET | |
3751 | if (hdr && mac_init_mbuf(m, wait) != 0) { | |
6d2010ae | 3752 | m_freem(m); |
b0d623f7 A |
3753 | return (NULL); |
3754 | } | |
3755 | #endif /* MAC_NET */ | |
3756 | } | |
3757 | return (m); | |
3758 | } | |
3759 | ||
2d21ac55 A |
3760 | /* m_mclget() add an mbuf cluster to a normal mbuf */ |
3761 | struct mbuf * | |
3762 | m_mclget(struct mbuf *m, int wait) | |
3763 | { | |
3764 | struct ext_ref *rfa; | |
3765 | ||
3766 | if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) | |
3767 | return (m); | |
3768 | ||
3769 | m->m_ext.ext_buf = m_mclalloc(wait); | |
3770 | if (m->m_ext.ext_buf != NULL) { | |
3771 | MBUF_CL_INIT(m, m->m_ext.ext_buf, rfa, 1, 0); | |
3772 | } else { | |
3773 | mcache_free(ref_cache, rfa); | |
3774 | } | |
3775 | return (m); | |
3776 | } | |
3777 | ||
3778 | /* Allocate an mbuf cluster */ | |
3779 | caddr_t | |
3780 | m_mclalloc(int wait) | |
3781 | { | |
3782 | int mcflags = MSLEEPF(wait); | |
3783 | ||
3784 | /* Is this due to a non-blocking retry? If so, then try harder */ | |
3785 | if (mcflags & MCR_NOSLEEP) | |
3786 | mcflags |= MCR_TRYHARD; | |
3787 | ||
3788 | return (mcache_alloc(m_cache(MC_CL), mcflags)); | |
3789 | } | |
3790 | ||
3791 | /* Free an mbuf cluster */ | |
3792 | void | |
3793 | m_mclfree(caddr_t p) | |
3794 | { | |
3795 | mcache_free(m_cache(MC_CL), p); | |
3796 | } | |
3797 | ||
3798 | /* | |
3799 | * mcl_hasreference() checks if a cluster of an mbuf is referenced by | |
6d2010ae | 3800 | * another mbuf; see comments in m_incref() regarding EXTF_READONLY. |
2d21ac55 A |
3801 | */ |
3802 | int | |
3803 | m_mclhasreference(struct mbuf *m) | |
3804 | { | |
3805 | if (!(m->m_flags & M_EXT)) | |
3806 | return (0); | |
9bccf70c | 3807 | |
813fb2f6 | 3808 | ASSERT(m_get_rfa(m) != NULL); |
2d21ac55 | 3809 | |
6d2010ae | 3810 | return ((MEXT_FLAGS(m) & EXTF_READONLY) ? 1 : 0); |
9bccf70c A |
3811 | } |
3812 | ||
2d21ac55 A |
3813 | __private_extern__ caddr_t |
3814 | m_bigalloc(int wait) | |
9bccf70c | 3815 | { |
2d21ac55 | 3816 | int mcflags = MSLEEPF(wait); |
91447636 | 3817 | |
2d21ac55 A |
3818 | /* Is this due to a non-blocking retry? If so, then try harder */ |
3819 | if (mcflags & MCR_NOSLEEP) | |
3820 | mcflags |= MCR_TRYHARD; | |
91447636 | 3821 | |
2d21ac55 | 3822 | return (mcache_alloc(m_cache(MC_BIGCL), mcflags)); |
9bccf70c A |
3823 | } |
3824 | ||
2d21ac55 A |
3825 | __private_extern__ void |
3826 | m_bigfree(caddr_t p, __unused u_int size, __unused caddr_t arg) | |
9bccf70c | 3827 | { |
2d21ac55 | 3828 | mcache_free(m_cache(MC_BIGCL), p); |
9bccf70c A |
3829 | } |
3830 | ||
2d21ac55 A |
3831 | /* m_mbigget() add an 4KB mbuf cluster to a normal mbuf */ |
3832 | __private_extern__ struct mbuf * | |
3833 | m_mbigget(struct mbuf *m, int wait) | |
3834 | { | |
3835 | struct ext_ref *rfa; | |
3836 | ||
3837 | if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) | |
3838 | return (m); | |
3839 | ||
3840 | m->m_ext.ext_buf = m_bigalloc(wait); | |
3841 | if (m->m_ext.ext_buf != NULL) { | |
3842 | MBUF_BIGCL_INIT(m, m->m_ext.ext_buf, rfa, 1, 0); | |
91447636 | 3843 | } else { |
2d21ac55 | 3844 | mcache_free(ref_cache, rfa); |
91447636 | 3845 | } |
2d21ac55 A |
3846 | return (m); |
3847 | } | |
3848 | ||
3849 | __private_extern__ caddr_t | |
3850 | m_16kalloc(int wait) | |
3851 | { | |
3852 | int mcflags = MSLEEPF(wait); | |
3853 | ||
3854 | /* Is this due to a non-blocking retry? If so, then try harder */ | |
3855 | if (mcflags & MCR_NOSLEEP) | |
3856 | mcflags |= MCR_TRYHARD; | |
3857 | ||
3858 | return (mcache_alloc(m_cache(MC_16KCL), mcflags)); | |
91447636 A |
3859 | } |
3860 | ||
3861 | __private_extern__ void | |
2d21ac55 | 3862 | m_16kfree(caddr_t p, __unused u_int size, __unused caddr_t arg) |
91447636 | 3863 | { |
2d21ac55 | 3864 | mcache_free(m_cache(MC_16KCL), p); |
91447636 A |
3865 | } |
3866 | ||
2d21ac55 | 3867 | /* m_m16kget() add a 16KB mbuf cluster to a normal mbuf */ |
91447636 | 3868 | __private_extern__ struct mbuf * |
2d21ac55 | 3869 | m_m16kget(struct mbuf *m, int wait) |
91447636 | 3870 | { |
2d21ac55 A |
3871 | struct ext_ref *rfa; |
3872 | ||
3873 | if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) | |
3874 | return (m); | |
3875 | ||
3876 | m->m_ext.ext_buf = m_16kalloc(wait); | |
3877 | if (m->m_ext.ext_buf != NULL) { | |
3878 | MBUF_16KCL_INIT(m, m->m_ext.ext_buf, rfa, 1, 0); | |
3879 | } else { | |
3880 | mcache_free(ref_cache, rfa); | |
91447636 | 3881 | } |
2d21ac55 | 3882 | return (m); |
91447636 A |
3883 | } |
3884 | ||
b0d623f7 A |
3885 | /* |
3886 | * "Move" mbuf pkthdr from "from" to "to". | |
3887 | * "from" must have M_PKTHDR set, and "to" must be empty. | |
3888 | */ | |
9bccf70c | 3889 | void |
2d21ac55 | 3890 | m_copy_pkthdr(struct mbuf *to, struct mbuf *from) |
9bccf70c | 3891 | { |
39236c6e A |
3892 | VERIFY(from->m_flags & M_PKTHDR); |
3893 | ||
3894 | /* Check for scratch area overflow */ | |
3895 | m_redzone_verify(from); | |
3896 | ||
3897 | if (to->m_flags & M_PKTHDR) { | |
3898 | /* Check for scratch area overflow */ | |
3899 | m_redzone_verify(to); | |
3900 | /* We will be taking over the tags of 'to' */ | |
2d21ac55 | 3901 | m_tag_delete_chain(to, NULL); |
39236c6e | 3902 | } |
2d21ac55 | 3903 | to->m_pkthdr = from->m_pkthdr; /* especially tags */ |
39236c6e A |
3904 | m_classifier_init(from, 0); /* purge classifier info */ |
3905 | m_tag_init(from, 1); /* purge all tags from src */ | |
3906 | m_scratch_init(from); /* clear src scratch area */ | |
935ed37a A |
3907 | to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); |
3908 | if ((to->m_flags & M_EXT) == 0) | |
3909 | to->m_data = to->m_pktdat; | |
39236c6e | 3910 | m_redzone_init(to); /* setup red zone on dst */ |
9bccf70c A |
3911 | } |
3912 | ||
91447636 A |
3913 | /* |
3914 | * Duplicate "from"'s mbuf pkthdr in "to". | |
3915 | * "from" must have M_PKTHDR set, and "to" must be empty. | |
3916 | * In particular, this does a deep copy of the packet tags. | |
3917 | */ | |
3a60a9f5 | 3918 | static int |
91447636 A |
3919 | m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how) |
3920 | { | |
39236c6e A |
3921 | VERIFY(from->m_flags & M_PKTHDR); |
3922 | ||
3923 | /* Check for scratch area overflow */ | |
3924 | m_redzone_verify(from); | |
3925 | ||
3926 | if (to->m_flags & M_PKTHDR) { | |
3927 | /* Check for scratch area overflow */ | |
3928 | m_redzone_verify(to); | |
3929 | /* We will be taking over the tags of 'to' */ | |
2d21ac55 | 3930 | m_tag_delete_chain(to, NULL); |
39236c6e | 3931 | } |
2d21ac55 A |
3932 | to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); |
3933 | if ((to->m_flags & M_EXT) == 0) | |
3934 | to->m_data = to->m_pktdat; | |
3935 | to->m_pkthdr = from->m_pkthdr; | |
39236c6e A |
3936 | m_redzone_init(to); /* setup red zone on dst */ |
3937 | m_tag_init(to, 0); /* preserve dst static tags */ | |
2d21ac55 | 3938 | return (m_tag_copy_chain(to, from, how)); |
91447636 | 3939 | } |
fa4905b1 | 3940 | |
316670eb A |
3941 | void |
3942 | m_copy_pftag(struct mbuf *to, struct mbuf *from) | |
3943 | { | |
39037602 | 3944 | memcpy(m_pftag(to), m_pftag(from), sizeof(struct pf_mtag)); |
39236c6e | 3945 | #if PF_ECN |
39037602 A |
3946 | m_pftag(to)->pftag_hdr = NULL; |
3947 | m_pftag(to)->pftag_flags &= ~(PF_TAG_HDR_INET|PF_TAG_HDR_INET6); | |
39236c6e A |
3948 | #endif /* PF_ECN */ |
3949 | } | |
3950 | ||
3951 | void | |
3952 | m_classifier_init(struct mbuf *m, uint32_t pktf_mask) | |
3953 | { | |
3954 | VERIFY(m->m_flags & M_PKTHDR); | |
3955 | ||
3956 | m->m_pkthdr.pkt_proto = 0; | |
3957 | m->m_pkthdr.pkt_flowsrc = 0; | |
3958 | m->m_pkthdr.pkt_flowid = 0; | |
3959 | m->m_pkthdr.pkt_flags &= pktf_mask; /* caller-defined mask */ | |
3960 | /* preserve service class and interface info for loopback packets */ | |
3961 | if (!(m->m_pkthdr.pkt_flags & PKTF_LOOP)) | |
3962 | (void) m_set_service_class(m, MBUF_SC_BE); | |
3963 | if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) | |
3964 | m->m_pkthdr.pkt_ifainfo = 0; | |
3965 | #if MEASURE_BW | |
3966 | m->m_pkthdr.pkt_bwseq = 0; | |
3967 | #endif /* MEASURE_BW */ | |
39037602 | 3968 | m->m_pkthdr.pkt_timestamp = 0; |
39236c6e A |
3969 | } |
3970 | ||
3971 | void | |
3972 | m_copy_classifier(struct mbuf *to, struct mbuf *from) | |
3973 | { | |
3974 | VERIFY(to->m_flags & M_PKTHDR); | |
3975 | VERIFY(from->m_flags & M_PKTHDR); | |
3976 | ||
3977 | to->m_pkthdr.pkt_proto = from->m_pkthdr.pkt_proto; | |
3978 | to->m_pkthdr.pkt_flowsrc = from->m_pkthdr.pkt_flowsrc; | |
3979 | to->m_pkthdr.pkt_flowid = from->m_pkthdr.pkt_flowid; | |
3980 | to->m_pkthdr.pkt_flags = from->m_pkthdr.pkt_flags; | |
3981 | (void) m_set_service_class(to, from->m_pkthdr.pkt_svc); | |
3982 | to->m_pkthdr.pkt_ifainfo = from->m_pkthdr.pkt_ifainfo; | |
39236c6e A |
3983 | #if MEASURE_BW |
3984 | to->m_pkthdr.pkt_bwseq = from->m_pkthdr.pkt_bwseq; | |
3985 | #endif /* MEASURE_BW */ | |
316670eb A |
3986 | } |
3987 | ||
9bccf70c | 3988 | /* |
2d21ac55 A |
3989 | * Return a list of mbuf hdrs that point to clusters. Try for num_needed; |
3990 | * if wantall is not set, return whatever number were available. Set up the | |
3991 | * first num_with_pkthdrs with mbuf hdrs configured as packet headers; these | |
3992 | * are chained on the m_nextpkt field. Any packets requested beyond this | |
3993 | * are chained onto the last packet header's m_next field. The size of | |
3994 | * the cluster is controlled by the parameter bufsize. | |
9bccf70c | 3995 | */ |
91447636 | 3996 | __private_extern__ struct mbuf * |
2d21ac55 A |
3997 | m_getpackets_internal(unsigned int *num_needed, int num_with_pkthdrs, |
3998 | int wait, int wantall, size_t bufsize) | |
fa4905b1 A |
3999 | { |
4000 | struct mbuf *m; | |
4001 | struct mbuf **np, *top; | |
2d21ac55 A |
4002 | unsigned int pnum, needed = *num_needed; |
4003 | mcache_obj_t *mp_list = NULL; | |
4004 | int mcflags = MSLEEPF(wait); | |
39037602 | 4005 | u_int16_t flag; |
2d21ac55 A |
4006 | struct ext_ref *rfa; |
4007 | mcache_t *cp; | |
4008 | void *cl; | |
4009 | ||
4010 | ASSERT(bufsize == m_maxsize(MC_CL) || | |
4011 | bufsize == m_maxsize(MC_BIGCL) || | |
4012 | bufsize == m_maxsize(MC_16KCL)); | |
4013 | ||
4014 | /* | |
4015 | * Caller must first check for njcl because this | |
4016 | * routine is internal and not exposed/used via KPI. | |
4017 | */ | |
4018 | VERIFY(bufsize != m_maxsize(MC_16KCL) || njcl > 0); | |
4019 | ||
fa4905b1 A |
4020 | top = NULL; |
4021 | np = ⊤ | |
2d21ac55 | 4022 | pnum = 0; |
fa4905b1 | 4023 | |
2d21ac55 A |
4024 | /* |
4025 | * The caller doesn't want all the requested buffers; only some. | |
4026 | * Try hard to get what we can, but don't block. This effectively | |
4027 | * overrides MCR_SLEEP, since this thread will not go to sleep | |
4028 | * if we can't get all the buffers. | |
4029 | */ | |
4030 | if (!wantall || (mcflags & MCR_NOSLEEP)) | |
4031 | mcflags |= MCR_TRYHARD; | |
4032 | ||
4033 | /* Allocate the composite mbuf + cluster elements from the cache */ | |
4034 | if (bufsize == m_maxsize(MC_CL)) | |
4035 | cp = m_cache(MC_MBUF_CL); | |
4036 | else if (bufsize == m_maxsize(MC_BIGCL)) | |
4037 | cp = m_cache(MC_MBUF_BIGCL); | |
4038 | else | |
4039 | cp = m_cache(MC_MBUF_16KCL); | |
4040 | needed = mcache_alloc_ext(cp, &mp_list, needed, mcflags); | |
4041 | ||
4042 | for (pnum = 0; pnum < needed; pnum++) { | |
4043 | m = (struct mbuf *)mp_list; | |
4044 | mp_list = mp_list->obj_next; | |
4045 | ||
4046 | VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT); | |
4047 | cl = m->m_ext.ext_buf; | |
813fb2f6 | 4048 | rfa = m_get_rfa(m); |
2d21ac55 A |
4049 | |
4050 | ASSERT(cl != NULL && rfa != NULL); | |
4051 | VERIFY(MBUF_IS_COMPOSITE(m)); | |
4052 | ||
4053 | flag = MEXT_FLAGS(m); | |
4054 | ||
4055 | MBUF_INIT(m, num_with_pkthdrs, MT_DATA); | |
4056 | if (bufsize == m_maxsize(MC_16KCL)) { | |
4057 | MBUF_16KCL_INIT(m, cl, rfa, 1, flag); | |
4058 | } else if (bufsize == m_maxsize(MC_BIGCL)) { | |
4059 | MBUF_BIGCL_INIT(m, cl, rfa, 1, flag); | |
91447636 | 4060 | } else { |
2d21ac55 A |
4061 | MBUF_CL_INIT(m, cl, rfa, 1, flag); |
4062 | } | |
4063 | ||
4064 | if (num_with_pkthdrs > 0) { | |
4065 | --num_with_pkthdrs; | |
4066 | #if CONFIG_MACF_NET | |
4067 | if (mac_mbuf_label_init(m, wait) != 0) { | |
6d2010ae | 4068 | m_freem(m); |
2d21ac55 | 4069 | break; |
91447636 | 4070 | } |
2d21ac55 | 4071 | #endif /* MAC_NET */ |
91447636 | 4072 | } |
2d21ac55 A |
4073 | |
4074 | *np = m; | |
4075 | if (num_with_pkthdrs > 0) | |
91447636 A |
4076 | np = &m->m_nextpkt; |
4077 | else | |
4078 | np = &m->m_next; | |
4079 | } | |
2d21ac55 A |
4080 | ASSERT(pnum != *num_needed || mp_list == NULL); |
4081 | if (mp_list != NULL) | |
4082 | mcache_free_ext(cp, mp_list); | |
4083 | ||
4084 | if (pnum > 0) { | |
4085 | mtype_stat_add(MT_DATA, pnum); | |
4086 | mtype_stat_sub(MT_FREE, pnum); | |
4087 | } | |
4088 | ||
4089 | if (wantall && (pnum != *num_needed)) { | |
4090 | if (top != NULL) | |
4091 | m_freem_list(top); | |
4092 | return (NULL); | |
91447636 | 4093 | } |
fa4905b1 | 4094 | |
316670eb A |
4095 | if (pnum > *num_needed) { |
4096 | printf("%s: File a radar related to <rdar://10146739>. \ | |
4097 | needed = %u, pnum = %u, num_needed = %u \n", | |
4098 | __func__, needed, pnum, *num_needed); | |
39037602 | 4099 | } |
316670eb | 4100 | |
2d21ac55 A |
4101 | *num_needed = pnum; |
4102 | return (top); | |
4103 | } | |
fa4905b1 | 4104 | |
91447636 | 4105 | /* |
2d21ac55 A |
4106 | * Return list of mbuf linked by m_nextpkt. Try for numlist, and if |
4107 | * wantall is not set, return whatever number were available. The size of | |
4108 | * each mbuf in the list is controlled by the parameter packetlen. Each | |
4109 | * mbuf of the list may have a chain of mbufs linked by m_next. Each mbuf | |
4110 | * in the chain is called a segment. If maxsegments is not null and the | |
4111 | * value pointed to is not null, this specify the maximum number of segments | |
4112 | * for a chain of mbufs. If maxsegments is zero or the value pointed to | |
4113 | * is zero the caller does not have any restriction on the number of segments. | |
4114 | * The actual number of segments of a mbuf chain is return in the value | |
4115 | * pointed to by maxsegments. | |
91447636 | 4116 | */ |
91447636 | 4117 | __private_extern__ struct mbuf * |
2d21ac55 A |
4118 | m_allocpacket_internal(unsigned int *numlist, size_t packetlen, |
4119 | unsigned int *maxsegments, int wait, int wantall, size_t wantsize) | |
91447636 | 4120 | { |
2d21ac55 A |
4121 | struct mbuf **np, *top, *first = NULL; |
4122 | size_t bufsize, r_bufsize; | |
4123 | unsigned int num = 0; | |
4124 | unsigned int nsegs = 0; | |
4125 | unsigned int needed, resid; | |
4126 | int mcflags = MSLEEPF(wait); | |
4127 | mcache_obj_t *mp_list = NULL, *rmp_list = NULL; | |
4128 | mcache_t *cp = NULL, *rcp = NULL; | |
4129 | ||
4130 | if (*numlist == 0) | |
4131 | return (NULL); | |
fa4905b1 | 4132 | |
91447636 A |
4133 | top = NULL; |
4134 | np = ⊤ | |
2d21ac55 | 4135 | |
91447636 | 4136 | if (wantsize == 0) { |
2d21ac55 | 4137 | if (packetlen <= MINCLSIZE) { |
91447636 | 4138 | bufsize = packetlen; |
2d21ac55 A |
4139 | } else if (packetlen > m_maxsize(MC_CL)) { |
4140 | /* Use 4KB if jumbo cluster pool isn't available */ | |
4141 | if (packetlen <= m_maxsize(MC_BIGCL) || njcl == 0) | |
4142 | bufsize = m_maxsize(MC_BIGCL); | |
4143 | else | |
4144 | bufsize = m_maxsize(MC_16KCL); | |
4145 | } else { | |
4146 | bufsize = m_maxsize(MC_CL); | |
4147 | } | |
4148 | } else if (wantsize == m_maxsize(MC_CL) || | |
4149 | wantsize == m_maxsize(MC_BIGCL) || | |
4150 | (wantsize == m_maxsize(MC_16KCL) && njcl > 0)) { | |
91447636 | 4151 | bufsize = wantsize; |
2d21ac55 A |
4152 | } else { |
4153 | return (NULL); | |
4154 | } | |
91447636 A |
4155 | |
4156 | if (bufsize <= MHLEN) { | |
2d21ac55 | 4157 | nsegs = 1; |
91447636 A |
4158 | } else if (bufsize <= MINCLSIZE) { |
4159 | if (maxsegments != NULL && *maxsegments == 1) { | |
2d21ac55 A |
4160 | bufsize = m_maxsize(MC_CL); |
4161 | nsegs = 1; | |
91447636 | 4162 | } else { |
2d21ac55 | 4163 | nsegs = 2; |
fa4905b1 | 4164 | } |
2d21ac55 A |
4165 | } else if (bufsize == m_maxsize(MC_16KCL)) { |
4166 | VERIFY(njcl > 0); | |
3e170ce0 | 4167 | nsegs = ((packetlen - 1) >> M16KCLSHIFT) + 1; |
2d21ac55 | 4168 | } else if (bufsize == m_maxsize(MC_BIGCL)) { |
3e170ce0 | 4169 | nsegs = ((packetlen - 1) >> MBIGCLSHIFT) + 1; |
91447636 | 4170 | } else { |
2d21ac55 | 4171 | nsegs = ((packetlen - 1) >> MCLSHIFT) + 1; |
91447636 A |
4172 | } |
4173 | if (maxsegments != NULL) { | |
2d21ac55 A |
4174 | if (*maxsegments && nsegs > *maxsegments) { |
4175 | *maxsegments = nsegs; | |
4176 | return (NULL); | |
91447636 | 4177 | } |
2d21ac55 | 4178 | *maxsegments = nsegs; |
91447636 | 4179 | } |
91447636 | 4180 | |
2d21ac55 A |
4181 | /* |
4182 | * The caller doesn't want all the requested buffers; only some. | |
4183 | * Try hard to get what we can, but don't block. This effectively | |
4184 | * overrides MCR_SLEEP, since this thread will not go to sleep | |
4185 | * if we can't get all the buffers. | |
4186 | */ | |
4187 | if (!wantall || (mcflags & MCR_NOSLEEP)) | |
4188 | mcflags |= MCR_TRYHARD; | |
4189 | ||
4190 | /* | |
4191 | * Simple case where all elements in the lists/chains are mbufs. | |
4192 | * Unless bufsize is greater than MHLEN, each segment chain is made | |
4193 | * up of exactly 1 mbuf. Otherwise, each segment chain is made up | |
4194 | * of 2 mbufs; the second one is used for the residual data, i.e. | |
4195 | * the remaining data that cannot fit into the first mbuf. | |
4196 | */ | |
4197 | if (bufsize <= MINCLSIZE) { | |
4198 | /* Allocate the elements in one shot from the mbuf cache */ | |
4199 | ASSERT(bufsize <= MHLEN || nsegs == 2); | |
4200 | cp = m_cache(MC_MBUF); | |
4201 | needed = mcache_alloc_ext(cp, &mp_list, | |
4202 | (*numlist) * nsegs, mcflags); | |
4203 | ||
4204 | /* | |
4205 | * The number of elements must be even if we are to use an | |
4206 | * mbuf (instead of a cluster) to store the residual data. | |
4207 | * If we couldn't allocate the requested number of mbufs, | |
4208 | * trim the number down (if it's odd) in order to avoid | |
4209 | * creating a partial segment chain. | |
4210 | */ | |
4211 | if (bufsize > MHLEN && (needed & 0x1)) | |
4212 | needed--; | |
91447636 | 4213 | |
2d21ac55 A |
4214 | while (num < needed) { |
4215 | struct mbuf *m; | |
91447636 | 4216 | |
2d21ac55 A |
4217 | m = (struct mbuf *)mp_list; |
4218 | mp_list = mp_list->obj_next; | |
4219 | ASSERT(m != NULL); | |
91447636 | 4220 | |
2d21ac55 A |
4221 | MBUF_INIT(m, 1, MT_DATA); |
4222 | #if CONFIG_MACF_NET | |
4223 | if (mac_init_mbuf(m, wait) != 0) { | |
4224 | m_free(m); | |
4225 | break; | |
91447636 | 4226 | } |
2d21ac55 A |
4227 | #endif /* MAC_NET */ |
4228 | num++; | |
4229 | if (bufsize > MHLEN) { | |
4230 | /* A second mbuf for this segment chain */ | |
4231 | m->m_next = (struct mbuf *)mp_list; | |
4232 | mp_list = mp_list->obj_next; | |
4233 | ASSERT(m->m_next != NULL); | |
4234 | ||
4235 | MBUF_INIT(m->m_next, 0, MT_DATA); | |
4236 | num++; | |
91447636 | 4237 | } |
2d21ac55 A |
4238 | *np = m; |
4239 | np = &m->m_nextpkt; | |
4240 | } | |
4241 | ASSERT(num != *numlist || mp_list == NULL); | |
4242 | ||
4243 | if (num > 0) { | |
4244 | mtype_stat_add(MT_DATA, num); | |
4245 | mtype_stat_sub(MT_FREE, num); | |
4246 | } | |
4247 | num /= nsegs; | |
4248 | ||
4249 | /* We've got them all; return to caller */ | |
4250 | if (num == *numlist) | |
4251 | return (top); | |
4252 | ||
4253 | goto fail; | |
4254 | } | |
4255 | ||
4256 | /* | |
4257 | * Complex cases where elements are made up of one or more composite | |
4258 | * mbufs + cluster, depending on packetlen. Each N-segment chain can | |
4259 | * be illustrated as follows: | |
4260 | * | |
4261 | * [mbuf + cluster 1] [mbuf + cluster 2] ... [mbuf + cluster N] | |
4262 | * | |
4263 | * Every composite mbuf + cluster element comes from the intermediate | |
4264 | * cache (either MC_MBUF_CL or MC_MBUF_BIGCL). For space efficiency, | |
4265 | * the last composite element will come from the MC_MBUF_CL cache, | |
4266 | * unless the residual data is larger than 2KB where we use the | |
4267 | * big cluster composite cache (MC_MBUF_BIGCL) instead. Residual | |
4268 | * data is defined as extra data beyond the first element that cannot | |
4269 | * fit into the previous element, i.e. there is no residual data if | |
4270 | * the chain only has 1 segment. | |
4271 | */ | |
4272 | r_bufsize = bufsize; | |
4273 | resid = packetlen > bufsize ? packetlen % bufsize : 0; | |
4274 | if (resid > 0) { | |
4275 | /* There is residual data; figure out the cluster size */ | |
4276 | if (wantsize == 0 && packetlen > MINCLSIZE) { | |
4277 | /* | |
4278 | * Caller didn't request that all of the segments | |
4279 | * in the chain use the same cluster size; use the | |
4280 | * smaller of the cluster sizes. | |
4281 | */ | |
4282 | if (njcl > 0 && resid > m_maxsize(MC_BIGCL)) | |
4283 | r_bufsize = m_maxsize(MC_16KCL); | |
4284 | else if (resid > m_maxsize(MC_CL)) | |
4285 | r_bufsize = m_maxsize(MC_BIGCL); | |
4286 | else | |
4287 | r_bufsize = m_maxsize(MC_CL); | |
4288 | } else { | |
4289 | /* Use the same cluster size as the other segments */ | |
4290 | resid = 0; | |
4291 | } | |
4292 | } | |
4293 | ||
4294 | needed = *numlist; | |
4295 | if (resid > 0) { | |
4296 | /* | |
4297 | * Attempt to allocate composite mbuf + cluster elements for | |
4298 | * the residual data in each chain; record the number of such | |
4299 | * elements that can be allocated so that we know how many | |
4300 | * segment chains we can afford to create. | |
4301 | */ | |
4302 | if (r_bufsize <= m_maxsize(MC_CL)) | |
4303 | rcp = m_cache(MC_MBUF_CL); | |
4304 | else if (r_bufsize <= m_maxsize(MC_BIGCL)) | |
4305 | rcp = m_cache(MC_MBUF_BIGCL); | |
4306 | else | |
4307 | rcp = m_cache(MC_MBUF_16KCL); | |
4308 | needed = mcache_alloc_ext(rcp, &rmp_list, *numlist, mcflags); | |
4309 | ||
4310 | if (needed == 0) | |
4311 | goto fail; | |
4312 | ||
4313 | /* This is temporarily reduced for calculation */ | |
4314 | ASSERT(nsegs > 1); | |
4315 | nsegs--; | |
4316 | } | |
4317 | ||
4318 | /* | |
4319 | * Attempt to allocate the rest of the composite mbuf + cluster | |
4320 | * elements for the number of segment chains that we need. | |
4321 | */ | |
4322 | if (bufsize <= m_maxsize(MC_CL)) | |
4323 | cp = m_cache(MC_MBUF_CL); | |
4324 | else if (bufsize <= m_maxsize(MC_BIGCL)) | |
4325 | cp = m_cache(MC_MBUF_BIGCL); | |
4326 | else | |
4327 | cp = m_cache(MC_MBUF_16KCL); | |
4328 | needed = mcache_alloc_ext(cp, &mp_list, needed * nsegs, mcflags); | |
4329 | ||
4330 | /* Round it down to avoid creating a partial segment chain */ | |
4331 | needed = (needed / nsegs) * nsegs; | |
4332 | if (needed == 0) | |
4333 | goto fail; | |
4334 | ||
4335 | if (resid > 0) { | |
4336 | /* | |
4337 | * We're about to construct the chain(s); take into account | |
4338 | * the number of segments we have created above to hold the | |
4339 | * residual data for each chain, as well as restore the | |
4340 | * original count of segments per chain. | |
4341 | */ | |
4342 | ASSERT(nsegs > 0); | |
4343 | needed += needed / nsegs; | |
4344 | nsegs++; | |
4345 | } | |
4346 | ||
4347 | for (;;) { | |
4348 | struct mbuf *m; | |
39037602 | 4349 | u_int16_t flag; |
2d21ac55 A |
4350 | struct ext_ref *rfa; |
4351 | void *cl; | |
4352 | int pkthdr; | |
813fb2f6 | 4353 | m_ext_free_func_t m_free_func; |
2d21ac55 A |
4354 | |
4355 | ++num; | |
4356 | if (nsegs == 1 || (num % nsegs) != 0 || resid == 0) { | |
4357 | m = (struct mbuf *)mp_list; | |
4358 | mp_list = mp_list->obj_next; | |
4359 | } else { | |
4360 | m = (struct mbuf *)rmp_list; | |
4361 | rmp_list = rmp_list->obj_next; | |
4362 | } | |
813fb2f6 | 4363 | m_free_func = m_get_ext_free(m); |
2d21ac55 A |
4364 | ASSERT(m != NULL); |
4365 | VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT); | |
813fb2f6 A |
4366 | VERIFY(m_free_func == NULL || m_free_func == m_bigfree || |
4367 | m_free_func == m_16kfree); | |
2d21ac55 A |
4368 | |
4369 | cl = m->m_ext.ext_buf; | |
813fb2f6 | 4370 | rfa = m_get_rfa(m); |
2d21ac55 A |
4371 | |
4372 | ASSERT(cl != NULL && rfa != NULL); | |
4373 | VERIFY(MBUF_IS_COMPOSITE(m)); | |
4374 | ||
4375 | flag = MEXT_FLAGS(m); | |
4376 | ||
4377 | pkthdr = (nsegs == 1 || (num % nsegs) == 1); | |
4378 | if (pkthdr) | |
4379 | first = m; | |
4380 | MBUF_INIT(m, pkthdr, MT_DATA); | |
813fb2f6 | 4381 | if (m_free_func == m_16kfree) { |
2d21ac55 | 4382 | MBUF_16KCL_INIT(m, cl, rfa, 1, flag); |
813fb2f6 | 4383 | } else if (m_free_func == m_bigfree) { |
2d21ac55 A |
4384 | MBUF_BIGCL_INIT(m, cl, rfa, 1, flag); |
4385 | } else { | |
4386 | MBUF_CL_INIT(m, cl, rfa, 1, flag); | |
4387 | } | |
4388 | #if CONFIG_MACF_NET | |
4389 | if (pkthdr && mac_init_mbuf(m, wait) != 0) { | |
4390 | --num; | |
6d2010ae | 4391 | m_freem(m); |
2d21ac55 | 4392 | break; |
91447636 | 4393 | } |
2d21ac55 A |
4394 | #endif /* MAC_NET */ |
4395 | ||
4396 | *np = m; | |
4397 | if ((num % nsegs) == 0) | |
4398 | np = &first->m_nextpkt; | |
4399 | else | |
4400 | np = &m->m_next; | |
4401 | ||
4402 | if (num == needed) | |
4403 | break; | |
4404 | } | |
4405 | ||
4406 | if (num > 0) { | |
4407 | mtype_stat_add(MT_DATA, num); | |
4408 | mtype_stat_sub(MT_FREE, num); | |
91447636 | 4409 | } |
2d21ac55 A |
4410 | |
4411 | num /= nsegs; | |
4412 | ||
4413 | /* We've got them all; return to caller */ | |
4414 | if (num == *numlist) { | |
4415 | ASSERT(mp_list == NULL && rmp_list == NULL); | |
4416 | return (top); | |
4417 | } | |
4418 | ||
91447636 | 4419 | fail: |
2d21ac55 A |
4420 | /* Free up what's left of the above */ |
4421 | if (mp_list != NULL) | |
4422 | mcache_free_ext(cp, mp_list); | |
4423 | if (rmp_list != NULL) | |
4424 | mcache_free_ext(rcp, rmp_list); | |
4425 | if (wantall && top != NULL) { | |
91447636 | 4426 | m_freem(top); |
2d21ac55 | 4427 | return (NULL); |
91447636 | 4428 | } |
2d21ac55 A |
4429 | *numlist = num; |
4430 | return (top); | |
91447636 | 4431 | } |
fa4905b1 | 4432 | |
2d21ac55 A |
4433 | /* |
4434 | * Best effort to get a mbuf cluster + pkthdr. Used by drivers to allocated | |
4435 | * packets on receive ring. | |
91447636 A |
4436 | */ |
4437 | __private_extern__ struct mbuf * | |
2d21ac55 | 4438 | m_getpacket_how(int wait) |
91447636 A |
4439 | { |
4440 | unsigned int num_needed = 1; | |
2d21ac55 A |
4441 | |
4442 | return (m_getpackets_internal(&num_needed, 1, wait, 1, | |
4443 | m_maxsize(MC_CL))); | |
91447636 | 4444 | } |
fa4905b1 | 4445 | |
2d21ac55 A |
4446 | /* |
4447 | * Best effort to get a mbuf cluster + pkthdr. Used by drivers to allocated | |
4448 | * packets on receive ring. | |
91447636 A |
4449 | */ |
4450 | struct mbuf * | |
4451 | m_getpacket(void) | |
4452 | { | |
4453 | unsigned int num_needed = 1; | |
9bccf70c | 4454 | |
2d21ac55 A |
4455 | return (m_getpackets_internal(&num_needed, 1, M_WAIT, 1, |
4456 | m_maxsize(MC_CL))); | |
91447636 | 4457 | } |
fa4905b1 | 4458 | |
91447636 | 4459 | /* |
2d21ac55 A |
4460 | * Return a list of mbuf hdrs that point to clusters. Try for num_needed; |
4461 | * if this can't be met, return whatever number were available. Set up the | |
4462 | * first num_with_pkthdrs with mbuf hdrs configured as packet headers. These | |
4463 | * are chained on the m_nextpkt field. Any packets requested beyond this are | |
4464 | * chained onto the last packet header's m_next field. | |
91447636 A |
4465 | */ |
4466 | struct mbuf * | |
4467 | m_getpackets(int num_needed, int num_with_pkthdrs, int how) | |
4468 | { | |
4469 | unsigned int n = num_needed; | |
fa4905b1 | 4470 | |
2d21ac55 A |
4471 | return (m_getpackets_internal(&n, num_with_pkthdrs, how, 0, |
4472 | m_maxsize(MC_CL))); | |
4473 | } | |
fa4905b1 | 4474 | |
9bccf70c | 4475 | /* |
2d21ac55 A |
4476 | * Return a list of mbuf hdrs set up as packet hdrs chained together |
4477 | * on the m_nextpkt field | |
9bccf70c | 4478 | */ |
fa4905b1 A |
4479 | struct mbuf * |
4480 | m_getpackethdrs(int num_needed, int how) | |
4481 | { | |
4482 | struct mbuf *m; | |
4483 | struct mbuf **np, *top; | |
4484 | ||
4485 | top = NULL; | |
4486 | np = ⊤ | |
4487 | ||
fa4905b1 | 4488 | while (num_needed--) { |
2d21ac55 A |
4489 | m = _M_RETRYHDR(how, MT_DATA); |
4490 | if (m == NULL) | |
4491 | break; | |
4492 | ||
4493 | *np = m; | |
4494 | np = &m->m_nextpkt; | |
4495 | } | |
fa4905b1 A |
4496 | |
4497 | return (top); | |
4498 | } | |
4499 | ||
2d21ac55 A |
4500 | /* |
4501 | * Free an mbuf list (m_nextpkt) while following m_next. Returns the count | |
4502 | * for mbufs packets freed. Used by the drivers. | |
1c79356b | 4503 | */ |
2d21ac55 A |
4504 | int |
4505 | m_freem_list(struct mbuf *m) | |
1c79356b A |
4506 | { |
4507 | struct mbuf *nextpkt; | |
2d21ac55 A |
4508 | mcache_obj_t *mp_list = NULL; |
4509 | mcache_obj_t *mcl_list = NULL; | |
4510 | mcache_obj_t *mbc_list = NULL; | |
4511 | mcache_obj_t *m16k_list = NULL; | |
4512 | mcache_obj_t *m_mcl_list = NULL; | |
4513 | mcache_obj_t *m_mbc_list = NULL; | |
4514 | mcache_obj_t *m_m16k_list = NULL; | |
4515 | mcache_obj_t *ref_list = NULL; | |
4516 | int pktcount = 0; | |
4517 | int mt_free = 0, mt_data = 0, mt_header = 0, mt_soname = 0, mt_tag = 0; | |
4518 | ||
4519 | while (m != NULL) { | |
4520 | pktcount++; | |
4521 | ||
4522 | nextpkt = m->m_nextpkt; | |
4523 | m->m_nextpkt = NULL; | |
4524 | ||
4525 | while (m != NULL) { | |
4526 | struct mbuf *next = m->m_next; | |
4527 | mcache_obj_t *o, *rfa; | |
39037602 A |
4528 | u_int32_t composite; |
4529 | u_int16_t refcnt; | |
813fb2f6 | 4530 | m_ext_free_func_t m_free_func; |
fa4905b1 | 4531 | |
2d21ac55 A |
4532 | if (m->m_type == MT_FREE) |
4533 | panic("m_free: freeing an already freed mbuf"); | |
9bccf70c | 4534 | |
2d21ac55 | 4535 | if (m->m_flags & M_PKTHDR) { |
39236c6e A |
4536 | /* Check for scratch area overflow */ |
4537 | m_redzone_verify(m); | |
4538 | /* Free the aux data and tags if there is any */ | |
91447636 | 4539 | m_tag_delete_chain(m, NULL); |
91447636 | 4540 | } |
9bccf70c | 4541 | |
39037602 A |
4542 | if (!(m->m_flags & M_EXT)) { |
4543 | mt_free++; | |
2d21ac55 | 4544 | goto simple_free; |
39037602 A |
4545 | } |
4546 | ||
4547 | if (MBUF_IS_PAIRED(m) && m_free_paired(m)) { | |
4548 | m = next; | |
4549 | continue; | |
4550 | } | |
4551 | ||
4552 | mt_free++; | |
2d21ac55 | 4553 | |
316670eb | 4554 | o = (mcache_obj_t *)(void *)m->m_ext.ext_buf; |
2d21ac55 | 4555 | refcnt = m_decref(m); |
6d2010ae | 4556 | composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE); |
813fb2f6 | 4557 | m_free_func = m_get_ext_free(m); |
39037602 | 4558 | if (refcnt == MEXT_MINREF(m) && !composite) { |
813fb2f6 | 4559 | if (m_free_func == NULL) { |
2d21ac55 A |
4560 | o->obj_next = mcl_list; |
4561 | mcl_list = o; | |
813fb2f6 | 4562 | } else if (m_free_func == m_bigfree) { |
2d21ac55 A |
4563 | o->obj_next = mbc_list; |
4564 | mbc_list = o; | |
813fb2f6 | 4565 | } else if (m_free_func == m_16kfree) { |
2d21ac55 A |
4566 | o->obj_next = m16k_list; |
4567 | m16k_list = o; | |
4568 | } else { | |
813fb2f6 | 4569 | (*(m_free_func))((caddr_t)o, |
2d21ac55 | 4570 | m->m_ext.ext_size, |
813fb2f6 | 4571 | m_get_ext_arg(m)); |
2d21ac55 | 4572 | } |
813fb2f6 | 4573 | rfa = (mcache_obj_t *)(void *)m_get_rfa(m); |
2d21ac55 A |
4574 | rfa->obj_next = ref_list; |
4575 | ref_list = rfa; | |
813fb2f6 | 4576 | m_set_ext(m, NULL, NULL, NULL); |
39037602 A |
4577 | } else if (refcnt == MEXT_MINREF(m) && composite) { |
4578 | VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED)); | |
2d21ac55 A |
4579 | VERIFY(m->m_type != MT_FREE); |
4580 | /* | |
4581 | * Amortize the costs of atomic operations | |
4582 | * by doing them at the end, if possible. | |
4583 | */ | |
4584 | if (m->m_type == MT_DATA) | |
4585 | mt_data++; | |
4586 | else if (m->m_type == MT_HEADER) | |
4587 | mt_header++; | |
4588 | else if (m->m_type == MT_SONAME) | |
4589 | mt_soname++; | |
4590 | else if (m->m_type == MT_TAG) | |
4591 | mt_tag++; | |
4592 | else | |
4593 | mtype_stat_dec(m->m_type); | |
fa4905b1 | 4594 | |
2d21ac55 A |
4595 | m->m_type = MT_FREE; |
4596 | m->m_flags = M_EXT; | |
4597 | m->m_len = 0; | |
4598 | m->m_next = m->m_nextpkt = NULL; | |
4599 | ||
6d2010ae A |
4600 | MEXT_FLAGS(m) &= ~EXTF_READONLY; |
4601 | ||
2d21ac55 A |
4602 | /* "Free" into the intermediate cache */ |
4603 | o = (mcache_obj_t *)m; | |
813fb2f6 | 4604 | if (m_free_func == NULL) { |
2d21ac55 A |
4605 | o->obj_next = m_mcl_list; |
4606 | m_mcl_list = o; | |
813fb2f6 | 4607 | } else if (m_free_func == m_bigfree) { |
2d21ac55 A |
4608 | o->obj_next = m_mbc_list; |
4609 | m_mbc_list = o; | |
1c79356b | 4610 | } else { |
813fb2f6 | 4611 | VERIFY(m_free_func == m_16kfree); |
2d21ac55 A |
4612 | o->obj_next = m_m16k_list; |
4613 | m_m16k_list = o; | |
1c79356b | 4614 | } |
2d21ac55 A |
4615 | m = next; |
4616 | continue; | |
1c79356b | 4617 | } |
2d21ac55 A |
4618 | simple_free: |
4619 | /* | |
4620 | * Amortize the costs of atomic operations | |
4621 | * by doing them at the end, if possible. | |
4622 | */ | |
4623 | if (m->m_type == MT_DATA) | |
4624 | mt_data++; | |
4625 | else if (m->m_type == MT_HEADER) | |
4626 | mt_header++; | |
4627 | else if (m->m_type == MT_SONAME) | |
4628 | mt_soname++; | |
4629 | else if (m->m_type == MT_TAG) | |
4630 | mt_tag++; | |
4631 | else if (m->m_type != MT_FREE) | |
4632 | mtype_stat_dec(m->m_type); | |
4633 | ||
1c79356b | 4634 | m->m_type = MT_FREE; |
2d21ac55 A |
4635 | m->m_flags = m->m_len = 0; |
4636 | m->m_next = m->m_nextpkt = NULL; | |
fa4905b1 | 4637 | |
2d21ac55 A |
4638 | ((mcache_obj_t *)m)->obj_next = mp_list; |
4639 | mp_list = (mcache_obj_t *)m; | |
4640 | ||
4641 | m = next; | |
4642 | } | |
fa4905b1 | 4643 | |
2d21ac55 A |
4644 | m = nextpkt; |
4645 | } | |
fa4905b1 | 4646 | |
2d21ac55 A |
4647 | if (mt_free > 0) |
4648 | mtype_stat_add(MT_FREE, mt_free); | |
4649 | if (mt_data > 0) | |
4650 | mtype_stat_sub(MT_DATA, mt_data); | |
4651 | if (mt_header > 0) | |
4652 | mtype_stat_sub(MT_HEADER, mt_header); | |
4653 | if (mt_soname > 0) | |
4654 | mtype_stat_sub(MT_SONAME, mt_soname); | |
4655 | if (mt_tag > 0) | |
4656 | mtype_stat_sub(MT_TAG, mt_tag); | |
4657 | ||
4658 | if (mp_list != NULL) | |
4659 | mcache_free_ext(m_cache(MC_MBUF), mp_list); | |
4660 | if (mcl_list != NULL) | |
4661 | mcache_free_ext(m_cache(MC_CL), mcl_list); | |
4662 | if (mbc_list != NULL) | |
4663 | mcache_free_ext(m_cache(MC_BIGCL), mbc_list); | |
4664 | if (m16k_list != NULL) | |
4665 | mcache_free_ext(m_cache(MC_16KCL), m16k_list); | |
4666 | if (m_mcl_list != NULL) | |
4667 | mcache_free_ext(m_cache(MC_MBUF_CL), m_mcl_list); | |
4668 | if (m_mbc_list != NULL) | |
4669 | mcache_free_ext(m_cache(MC_MBUF_BIGCL), m_mbc_list); | |
4670 | if (m_m16k_list != NULL) | |
4671 | mcache_free_ext(m_cache(MC_MBUF_16KCL), m_m16k_list); | |
4672 | if (ref_list != NULL) | |
4673 | mcache_free_ext(ref_cache, ref_list); | |
4674 | ||
4675 | return (pktcount); | |
1c79356b A |
4676 | } |
4677 | ||
4678 | void | |
2d21ac55 | 4679 | m_freem(struct mbuf *m) |
1c79356b | 4680 | { |
2d21ac55 | 4681 | while (m != NULL) |
1c79356b A |
4682 | m = m_free(m); |
4683 | } | |
4684 | ||
4685 | /* | |
4686 | * Mbuffer utility routines. | |
4687 | */ | |
2d21ac55 | 4688 | |
1c79356b | 4689 | /* |
2d21ac55 A |
4690 | * Compute the amount of space available before the current start |
4691 | * of data in an mbuf. | |
1c79356b | 4692 | */ |
91447636 | 4693 | int |
2d21ac55 | 4694 | m_leadingspace(struct mbuf *m) |
1c79356b A |
4695 | { |
4696 | if (m->m_flags & M_EXT) { | |
4697 | if (MCLHASREFERENCE(m)) | |
2d21ac55 | 4698 | return (0); |
1c79356b A |
4699 | return (m->m_data - m->m_ext.ext_buf); |
4700 | } | |
4701 | if (m->m_flags & M_PKTHDR) | |
4702 | return (m->m_data - m->m_pktdat); | |
4703 | return (m->m_data - m->m_dat); | |
4704 | } | |
4705 | ||
4706 | /* | |
2d21ac55 | 4707 | * Compute the amount of space available after the end of data in an mbuf. |
1c79356b | 4708 | */ |
91447636 | 4709 | int |
2d21ac55 | 4710 | m_trailingspace(struct mbuf *m) |
1c79356b A |
4711 | { |
4712 | if (m->m_flags & M_EXT) { | |
4713 | if (MCLHASREFERENCE(m)) | |
2d21ac55 | 4714 | return (0); |
1c79356b | 4715 | return (m->m_ext.ext_buf + m->m_ext.ext_size - |
2d21ac55 | 4716 | (m->m_data + m->m_len)); |
1c79356b A |
4717 | } |
4718 | return (&m->m_dat[MLEN] - (m->m_data + m->m_len)); | |
4719 | } | |
4720 | ||
4721 | /* | |
2d21ac55 A |
4722 | * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain, |
4723 | * copy junk along. Does not adjust packet header length. | |
1c79356b A |
4724 | */ |
4725 | struct mbuf * | |
2d21ac55 | 4726 | m_prepend(struct mbuf *m, int len, int how) |
1c79356b A |
4727 | { |
4728 | struct mbuf *mn; | |
4729 | ||
2d21ac55 A |
4730 | _MGET(mn, how, m->m_type); |
4731 | if (mn == NULL) { | |
1c79356b | 4732 | m_freem(m); |
2d21ac55 | 4733 | return (NULL); |
1c79356b A |
4734 | } |
4735 | if (m->m_flags & M_PKTHDR) { | |
4736 | M_COPY_PKTHDR(mn, m); | |
4737 | m->m_flags &= ~M_PKTHDR; | |
4738 | } | |
4739 | mn->m_next = m; | |
4740 | m = mn; | |
3e170ce0 A |
4741 | if (m->m_flags & M_PKTHDR) { |
4742 | VERIFY(len <= MHLEN); | |
1c79356b | 4743 | MH_ALIGN(m, len); |
3e170ce0 A |
4744 | } else { |
4745 | VERIFY(len <= MLEN); | |
4746 | M_ALIGN(m, len); | |
4747 | } | |
1c79356b A |
4748 | m->m_len = len; |
4749 | return (m); | |
4750 | } | |
4751 | ||
9bccf70c | 4752 | /* |
2d21ac55 A |
4753 | * Replacement for old M_PREPEND macro: allocate new mbuf to prepend to |
4754 | * chain, copy junk along, and adjust length. | |
9bccf70c A |
4755 | */ |
4756 | struct mbuf * | |
3e170ce0 | 4757 | m_prepend_2(struct mbuf *m, int len, int how, int align) |
2d21ac55 | 4758 | { |
3e170ce0 A |
4759 | if (M_LEADINGSPACE(m) >= len && |
4760 | (!align || IS_P2ALIGNED((m->m_data - len), sizeof(u_int32_t)))) { | |
2d21ac55 A |
4761 | m->m_data -= len; |
4762 | m->m_len += len; | |
4763 | } else { | |
9bccf70c | 4764 | m = m_prepend(m, len, how); |
2d21ac55 A |
4765 | } |
4766 | if ((m) && (m->m_flags & M_PKTHDR)) | |
4767 | m->m_pkthdr.len += len; | |
4768 | return (m); | |
9bccf70c A |
4769 | } |
4770 | ||
1c79356b A |
4771 | /* |
4772 | * Make a copy of an mbuf chain starting "off0" bytes from the beginning, | |
4773 | * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. | |
4774 | * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. | |
4775 | */ | |
4776 | int MCFail; | |
4777 | ||
4778 | struct mbuf * | |
39236c6e | 4779 | m_copym_mode(struct mbuf *m, int off0, int len, int wait, uint32_t mode) |
1c79356b | 4780 | { |
2d21ac55 | 4781 | struct mbuf *n, *mhdr = NULL, **np; |
91447636 | 4782 | int off = off0; |
1c79356b A |
4783 | struct mbuf *top; |
4784 | int copyhdr = 0; | |
4785 | ||
4786 | if (off < 0 || len < 0) | |
2d21ac55 A |
4787 | panic("m_copym: invalid offset %d or len %d", off, len); |
4788 | ||
fe8ab488 A |
4789 | VERIFY((mode != M_COPYM_MUST_COPY_HDR && |
4790 | mode != M_COPYM_MUST_MOVE_HDR) || (m->m_flags & M_PKTHDR)); | |
4791 | ||
4792 | if ((off == 0 && (m->m_flags & M_PKTHDR)) || | |
4793 | mode == M_COPYM_MUST_COPY_HDR || mode == M_COPYM_MUST_MOVE_HDR) { | |
2d21ac55 | 4794 | mhdr = m; |
1c79356b | 4795 | copyhdr = 1; |
2d21ac55 | 4796 | } |
fa4905b1 A |
4797 | |
4798 | while (off >= m->m_len) { | |
2d21ac55 A |
4799 | if (m->m_next == NULL) |
4800 | panic("m_copym: invalid mbuf chain"); | |
1c79356b A |
4801 | off -= m->m_len; |
4802 | m = m->m_next; | |
4803 | } | |
4804 | np = ⊤ | |
2d21ac55 | 4805 | top = NULL; |
fa4905b1 | 4806 | |
1c79356b | 4807 | while (len > 0) { |
2d21ac55 | 4808 | if (m == NULL) { |
1c79356b | 4809 | if (len != M_COPYALL) |
2d21ac55 | 4810 | panic("m_copym: len != M_COPYALL"); |
1c79356b A |
4811 | break; |
4812 | } | |
2d21ac55 | 4813 | |
fe8ab488 A |
4814 | if (copyhdr) |
4815 | n = _M_RETRYHDR(wait, m->m_type); | |
4816 | else | |
4817 | n = _M_RETRY(wait, m->m_type); | |
1c79356b | 4818 | *np = n; |
fa4905b1 | 4819 | |
2d21ac55 | 4820 | if (n == NULL) |
1c79356b | 4821 | goto nospace; |
2d21ac55 A |
4822 | |
4823 | if (copyhdr != 0) { | |
fe8ab488 A |
4824 | if ((mode == M_COPYM_MOVE_HDR) || |
4825 | (mode == M_COPYM_MUST_MOVE_HDR)) { | |
39236c6e | 4826 | M_COPY_PKTHDR(n, mhdr); |
fe8ab488 A |
4827 | } else if ((mode == M_COPYM_COPY_HDR) || |
4828 | (mode == M_COPYM_MUST_COPY_HDR)) { | |
39236c6e A |
4829 | if (m_dup_pkthdr(n, mhdr, wait) == 0) |
4830 | goto nospace; | |
4831 | } | |
1c79356b A |
4832 | if (len == M_COPYALL) |
4833 | n->m_pkthdr.len -= off0; | |
4834 | else | |
4835 | n->m_pkthdr.len = len; | |
4836 | copyhdr = 0; | |
fe8ab488 A |
4837 | /* |
4838 | * There is data to copy from the packet header mbuf | |
4839 | * if it is empty or it is before the starting offset | |
4840 | */ | |
4841 | if (mhdr != m) { | |
4842 | np = &n->m_next; | |
4843 | continue; | |
2d21ac55 | 4844 | } |
1c79356b | 4845 | } |
2d21ac55 | 4846 | n->m_len = MIN(len, (m->m_len - off)); |
1c79356b | 4847 | if (m->m_flags & M_EXT) { |
1c79356b | 4848 | n->m_ext = m->m_ext; |
2d21ac55 | 4849 | m_incref(m); |
1c79356b A |
4850 | n->m_data = m->m_data + off; |
4851 | n->m_flags |= M_EXT; | |
fa4905b1 | 4852 | } else { |
fe8ab488 A |
4853 | /* |
4854 | * Limit to the capacity of the destination | |
4855 | */ | |
4856 | if (n->m_flags & M_PKTHDR) | |
4857 | n->m_len = MIN(n->m_len, MHLEN); | |
4858 | else | |
4859 | n->m_len = MIN(n->m_len, MLEN); | |
4860 | ||
4861 | if (MTOD(n, char *) + n->m_len > ((char *)n) + MSIZE) | |
39037602 | 4862 | panic("%s n %p copy overflow", |
fe8ab488 A |
4863 | __func__, n); |
4864 | ||
2d21ac55 | 4865 | bcopy(MTOD(m, caddr_t)+off, MTOD(n, caddr_t), |
1c79356b | 4866 | (unsigned)n->m_len); |
fa4905b1 | 4867 | } |
1c79356b A |
4868 | if (len != M_COPYALL) |
4869 | len -= n->m_len; | |
4870 | off = 0; | |
4871 | m = m->m_next; | |
4872 | np = &n->m_next; | |
4873 | } | |
fa4905b1 | 4874 | |
2d21ac55 | 4875 | if (top == NULL) |
1c79356b | 4876 | MCFail++; |
fa4905b1 | 4877 | |
1c79356b A |
4878 | return (top); |
4879 | nospace: | |
fa4905b1 | 4880 | |
1c79356b A |
4881 | m_freem(top); |
4882 | MCFail++; | |
2d21ac55 | 4883 | return (NULL); |
1c79356b A |
4884 | } |
4885 | ||
39236c6e A |
4886 | |
4887 | struct mbuf * | |
4888 | m_copym(struct mbuf *m, int off0, int len, int wait) | |
4889 | { | |
4890 | return (m_copym_mode(m, off0, len, wait, M_COPYM_MOVE_HDR)); | |
4891 | } | |
4892 | ||
9bccf70c | 4893 | /* |
2d21ac55 A |
4894 | * Equivalent to m_copym except that all necessary mbuf hdrs are allocated |
4895 | * within this routine also, the last mbuf and offset accessed are passed | |
4896 | * out and can be passed back in to avoid having to rescan the entire mbuf | |
4897 | * list (normally hung off of the socket) | |
9bccf70c | 4898 | */ |
fa4905b1 | 4899 | struct mbuf * |
fe8ab488 | 4900 | m_copym_with_hdrs(struct mbuf *m0, int off0, int len0, int wait, |
39236c6e | 4901 | struct mbuf **m_lastm, int *m_off, uint32_t mode) |
2d21ac55 | 4902 | { |
fe8ab488 | 4903 | struct mbuf *m = m0, *n, **np = NULL; |
2d21ac55 A |
4904 | int off = off0, len = len0; |
4905 | struct mbuf *top = NULL; | |
4906 | int mcflags = MSLEEPF(wait); | |
fa4905b1 | 4907 | int copyhdr = 0; |
2d21ac55 A |
4908 | int type = 0; |
4909 | mcache_obj_t *list = NULL; | |
4910 | int needed = 0; | |
fa4905b1 | 4911 | |
2d21ac55 | 4912 | if (off == 0 && (m->m_flags & M_PKTHDR)) |
fa4905b1 | 4913 | copyhdr = 1; |
39037602 | 4914 | |
fe8ab488 | 4915 | if (m_lastm != NULL && *m_lastm != NULL) { |
6d2010ae | 4916 | m = *m_lastm; |
fa4905b1 A |
4917 | off = *m_off; |
4918 | } else { | |
2d21ac55 A |
4919 | while (off >= m->m_len) { |
4920 | off -= m->m_len; | |
fa4905b1 A |
4921 | m = m->m_next; |
4922 | } | |
4923 | } | |
91447636 | 4924 | |
2d21ac55 A |
4925 | n = m; |
4926 | while (len > 0) { | |
4927 | needed++; | |
4928 | ASSERT(n != NULL); | |
4929 | len -= MIN(len, (n->m_len - ((needed == 1) ? off : 0))); | |
4930 | n = n->m_next; | |
4931 | } | |
4932 | needed++; | |
4933 | len = len0; | |
4934 | ||
4935 | /* | |
4936 | * If the caller doesn't want to be put to sleep, mark it with | |
4937 | * MCR_TRYHARD so that we may reclaim buffers from other places | |
4938 | * before giving up. | |
4939 | */ | |
4940 | if (mcflags & MCR_NOSLEEP) | |
4941 | mcflags |= MCR_TRYHARD; | |
4942 | ||
4943 | if (mcache_alloc_ext(m_cache(MC_MBUF), &list, needed, | |
4944 | mcflags) != needed) | |
4945 | goto nospace; | |
fa4905b1 | 4946 | |
2d21ac55 | 4947 | needed = 0; |
fa4905b1 | 4948 | while (len > 0) { |
2d21ac55 A |
4949 | n = (struct mbuf *)list; |
4950 | list = list->obj_next; | |
4951 | ASSERT(n != NULL && m != NULL); | |
4952 | ||
4953 | type = (top == NULL) ? MT_HEADER : m->m_type; | |
4954 | MBUF_INIT(n, (top == NULL), type); | |
4955 | #if CONFIG_MACF_NET | |
4956 | if (top == NULL && mac_mbuf_label_init(n, wait) != 0) { | |
4957 | mtype_stat_inc(MT_HEADER); | |
4958 | mtype_stat_dec(MT_FREE); | |
4959 | m_free(n); | |
fa4905b1 | 4960 | goto nospace; |
2d21ac55 A |
4961 | } |
4962 | #endif /* MAC_NET */ | |
4963 | ||
4964 | if (top == NULL) { | |
4965 | top = n; | |
fa4905b1 A |
4966 | np = &top->m_next; |
4967 | continue; | |
2d21ac55 A |
4968 | } else { |
4969 | needed++; | |
4970 | *np = n; | |
4971 | } | |
fa4905b1 A |
4972 | |
4973 | if (copyhdr) { | |
fe8ab488 A |
4974 | if ((mode == M_COPYM_MOVE_HDR) || |
4975 | (mode == M_COPYM_MUST_MOVE_HDR)) { | |
39236c6e | 4976 | M_COPY_PKTHDR(n, m); |
fe8ab488 A |
4977 | } else if ((mode == M_COPYM_COPY_HDR) || |
4978 | (mode == M_COPYM_MUST_COPY_HDR)) { | |
39236c6e A |
4979 | if (m_dup_pkthdr(n, m, wait) == 0) |
4980 | goto nospace; | |
4981 | } | |
fa4905b1 A |
4982 | n->m_pkthdr.len = len; |
4983 | copyhdr = 0; | |
4984 | } | |
2d21ac55 | 4985 | n->m_len = MIN(len, (m->m_len - off)); |
fa4905b1 A |
4986 | |
4987 | if (m->m_flags & M_EXT) { | |
4988 | n->m_ext = m->m_ext; | |
2d21ac55 | 4989 | m_incref(m); |
fa4905b1 A |
4990 | n->m_data = m->m_data + off; |
4991 | n->m_flags |= M_EXT; | |
4992 | } else { | |
fe8ab488 | 4993 | if (MTOD(n, char *) + n->m_len > ((char *)n) + MSIZE) |
39037602 | 4994 | panic("%s n %p copy overflow", |
fe8ab488 A |
4995 | __func__, n); |
4996 | ||
2d21ac55 | 4997 | bcopy(MTOD(m, caddr_t)+off, MTOD(n, caddr_t), |
fa4905b1 A |
4998 | (unsigned)n->m_len); |
4999 | } | |
5000 | len -= n->m_len; | |
2d21ac55 | 5001 | |
fa4905b1 | 5002 | if (len == 0) { |
fe8ab488 A |
5003 | if (m_lastm != NULL && m_off != NULL) { |
5004 | if ((off + n->m_len) == m->m_len) { | |
5005 | *m_lastm = m->m_next; | |
5006 | *m_off = 0; | |
5007 | } else { | |
5008 | *m_lastm = m; | |
5009 | *m_off = off + n->m_len; | |
5010 | } | |
fa4905b1 | 5011 | } |
2d21ac55 | 5012 | break; |
fa4905b1 A |
5013 | } |
5014 | off = 0; | |
5015 | m = m->m_next; | |
5016 | np = &n->m_next; | |
5017 | } | |
fa4905b1 | 5018 | |
2d21ac55 A |
5019 | mtype_stat_inc(MT_HEADER); |
5020 | mtype_stat_add(type, needed); | |
5021 | mtype_stat_sub(MT_FREE, needed + 1); | |
5022 | ||
5023 | ASSERT(list == NULL); | |
fa4905b1 | 5024 | return (top); |
fa4905b1 | 5025 | |
2d21ac55 A |
5026 | nospace: |
5027 | if (list != NULL) | |
5028 | mcache_free_ext(m_cache(MC_MBUF), list); | |
5029 | if (top != NULL) | |
5030 | m_freem(top); | |
fa4905b1 | 5031 | MCFail++; |
2d21ac55 | 5032 | return (NULL); |
fa4905b1 A |
5033 | } |
5034 | ||
1c79356b A |
5035 | /* |
5036 | * Copy data from an mbuf chain starting "off" bytes from the beginning, | |
5037 | * continuing for "len" bytes, into the indicated buffer. | |
5038 | */ | |
2d21ac55 | 5039 | void |
b0d623f7 | 5040 | m_copydata(struct mbuf *m, int off, int len, void *vp) |
1c79356b | 5041 | { |
91447636 | 5042 | unsigned count; |
b0d623f7 | 5043 | char *cp = vp; |
1c79356b A |
5044 | |
5045 | if (off < 0 || len < 0) | |
2d21ac55 A |
5046 | panic("m_copydata: invalid offset %d or len %d", off, len); |
5047 | ||
1c79356b | 5048 | while (off > 0) { |
2d21ac55 A |
5049 | if (m == NULL) |
5050 | panic("m_copydata: invalid mbuf chain"); | |
1c79356b A |
5051 | if (off < m->m_len) |
5052 | break; | |
5053 | off -= m->m_len; | |
5054 | m = m->m_next; | |
5055 | } | |
5056 | while (len > 0) { | |
2d21ac55 A |
5057 | if (m == NULL) |
5058 | panic("m_copydata: invalid mbuf chain"); | |
5059 | count = MIN(m->m_len - off, len); | |
5060 | bcopy(MTOD(m, caddr_t) + off, cp, count); | |
1c79356b A |
5061 | len -= count; |
5062 | cp += count; | |
5063 | off = 0; | |
5064 | m = m->m_next; | |
5065 | } | |
5066 | } | |
5067 | ||
5068 | /* | |
2d21ac55 A |
5069 | * Concatenate mbuf chain n to m. Both chains must be of the same type |
5070 | * (e.g. MT_DATA). Any m_pkthdr is not updated. | |
1c79356b | 5071 | */ |
2d21ac55 A |
5072 | void |
5073 | m_cat(struct mbuf *m, struct mbuf *n) | |
1c79356b A |
5074 | { |
5075 | while (m->m_next) | |
5076 | m = m->m_next; | |
5077 | while (n) { | |
2d21ac55 | 5078 | if ((m->m_flags & M_EXT) || |
1c79356b A |
5079 | m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { |
5080 | /* just join the two chains */ | |
5081 | m->m_next = n; | |
5082 | return; | |
5083 | } | |
5084 | /* splat the data from one into the other */ | |
2d21ac55 | 5085 | bcopy(MTOD(n, caddr_t), MTOD(m, caddr_t) + m->m_len, |
1c79356b A |
5086 | (u_int)n->m_len); |
5087 | m->m_len += n->m_len; | |
5088 | n = m_free(n); | |
5089 | } | |
5090 | } | |
5091 | ||
5092 | void | |
2d21ac55 | 5093 | m_adj(struct mbuf *mp, int req_len) |
1c79356b | 5094 | { |
91447636 A |
5095 | int len = req_len; |
5096 | struct mbuf *m; | |
5097 | int count; | |
1c79356b A |
5098 | |
5099 | if ((m = mp) == NULL) | |
5100 | return; | |
5101 | if (len >= 0) { | |
5102 | /* | |
5103 | * Trim from head. | |
5104 | */ | |
5105 | while (m != NULL && len > 0) { | |
5106 | if (m->m_len <= len) { | |
5107 | len -= m->m_len; | |
5108 | m->m_len = 0; | |
5109 | m = m->m_next; | |
5110 | } else { | |
5111 | m->m_len -= len; | |
5112 | m->m_data += len; | |
5113 | len = 0; | |
5114 | } | |
5115 | } | |
5116 | m = mp; | |
5117 | if (m->m_flags & M_PKTHDR) | |
5118 | m->m_pkthdr.len -= (req_len - len); | |
5119 | } else { | |
5120 | /* | |
5121 | * Trim from tail. Scan the mbuf chain, | |
5122 | * calculating its length and finding the last mbuf. | |
5123 | * If the adjustment only affects this mbuf, then just | |
5124 | * adjust and return. Otherwise, rescan and truncate | |
5125 | * after the remaining size. | |
5126 | */ | |
5127 | len = -len; | |
5128 | count = 0; | |
5129 | for (;;) { | |
5130 | count += m->m_len; | |
5131 | if (m->m_next == (struct mbuf *)0) | |
5132 | break; | |
5133 | m = m->m_next; | |
5134 | } | |
5135 | if (m->m_len >= len) { | |
5136 | m->m_len -= len; | |
5137 | m = mp; | |
5138 | if (m->m_flags & M_PKTHDR) | |
5139 | m->m_pkthdr.len -= len; | |
5140 | return; | |
5141 | } | |
5142 | count -= len; | |
5143 | if (count < 0) | |
5144 | count = 0; | |
5145 | /* | |
5146 | * Correct length for chain is "count". | |
5147 | * Find the mbuf with last data, adjust its length, | |
5148 | * and toss data from remaining mbufs on chain. | |
5149 | */ | |
5150 | m = mp; | |
5151 | if (m->m_flags & M_PKTHDR) | |
5152 | m->m_pkthdr.len = count; | |
5153 | for (; m; m = m->m_next) { | |
5154 | if (m->m_len >= count) { | |
5155 | m->m_len = count; | |
5156 | break; | |
5157 | } | |
5158 | count -= m->m_len; | |
5159 | } | |
91447636 | 5160 | while ((m = m->m_next)) |
1c79356b A |
5161 | m->m_len = 0; |
5162 | } | |
5163 | } | |
5164 | ||
5165 | /* | |
5166 | * Rearange an mbuf chain so that len bytes are contiguous | |
5167 | * and in the data area of an mbuf (so that mtod and dtom | |
5168 | * will work for a structure of size len). Returns the resulting | |
5169 | * mbuf chain on success, frees it and returns null on failure. | |
5170 | * If there is room, it will add up to max_protohdr-len extra bytes to the | |
5171 | * contiguous region in an attempt to avoid being called next time. | |
5172 | */ | |
5173 | int MPFail; | |
5174 | ||
5175 | struct mbuf * | |
2d21ac55 | 5176 | m_pullup(struct mbuf *n, int len) |
1c79356b | 5177 | { |
91447636 A |
5178 | struct mbuf *m; |
5179 | int count; | |
1c79356b A |
5180 | int space; |
5181 | ||
5182 | /* | |
5183 | * If first mbuf has no cluster, and has room for len bytes | |
5184 | * without shifting current data, pullup into it, | |
5185 | * otherwise allocate a new mbuf to prepend to the chain. | |
5186 | */ | |
5187 | if ((n->m_flags & M_EXT) == 0 && | |
5188 | n->m_data + len < &n->m_dat[MLEN] && n->m_next) { | |
5189 | if (n->m_len >= len) | |
5190 | return (n); | |
5191 | m = n; | |
5192 | n = n->m_next; | |
5193 | len -= m->m_len; | |
5194 | } else { | |
5195 | if (len > MHLEN) | |
5196 | goto bad; | |
2d21ac55 | 5197 | _MGET(m, M_DONTWAIT, n->m_type); |
1c79356b A |
5198 | if (m == 0) |
5199 | goto bad; | |
5200 | m->m_len = 0; | |
5201 | if (n->m_flags & M_PKTHDR) { | |
5202 | M_COPY_PKTHDR(m, n); | |
5203 | n->m_flags &= ~M_PKTHDR; | |
5204 | } | |
5205 | } | |
5206 | space = &m->m_dat[MLEN] - (m->m_data + m->m_len); | |
5207 | do { | |
2d21ac55 A |
5208 | count = MIN(MIN(MAX(len, max_protohdr), space), n->m_len); |
5209 | bcopy(MTOD(n, caddr_t), MTOD(m, caddr_t) + m->m_len, | |
5210 | (unsigned)count); | |
1c79356b A |
5211 | len -= count; |
5212 | m->m_len += count; | |
5213 | n->m_len -= count; | |
5214 | space -= count; | |
5215 | if (n->m_len) | |
5216 | n->m_data += count; | |
5217 | else | |
5218 | n = m_free(n); | |
5219 | } while (len > 0 && n); | |
5220 | if (len > 0) { | |
5221 | (void) m_free(m); | |
5222 | goto bad; | |
5223 | } | |
5224 | m->m_next = n; | |
5225 | return (m); | |
5226 | bad: | |
5227 | m_freem(n); | |
5228 | MPFail++; | |
5229 | return (0); | |
5230 | } | |
5231 | ||
6d2010ae A |
5232 | /* |
5233 | * Like m_pullup(), except a new mbuf is always allocated, and we allow | |
5234 | * the amount of empty space before the data in the new mbuf to be specified | |
5235 | * (in the event that the caller expects to prepend later). | |
5236 | */ | |
5237 | __private_extern__ int MSFail = 0; | |
5238 | ||
5239 | __private_extern__ struct mbuf * | |
5240 | m_copyup(struct mbuf *n, int len, int dstoff) | |
5241 | { | |
5242 | struct mbuf *m; | |
5243 | int count, space; | |
5244 | ||
5245 | if (len > (MHLEN - dstoff)) | |
5246 | goto bad; | |
5247 | MGET(m, M_DONTWAIT, n->m_type); | |
5248 | if (m == NULL) | |
5249 | goto bad; | |
5250 | m->m_len = 0; | |
5251 | if (n->m_flags & M_PKTHDR) { | |
5252 | m_copy_pkthdr(m, n); | |
5253 | n->m_flags &= ~M_PKTHDR; | |
5254 | } | |
5255 | m->m_data += dstoff; | |
5256 | space = &m->m_dat[MLEN] - (m->m_data + m->m_len); | |
5257 | do { | |
5258 | count = min(min(max(len, max_protohdr), space), n->m_len); | |
5259 | memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), | |
5260 | (unsigned)count); | |
5261 | len -= count; | |
5262 | m->m_len += count; | |
5263 | n->m_len -= count; | |
5264 | space -= count; | |
5265 | if (n->m_len) | |
5266 | n->m_data += count; | |
5267 | else | |
5268 | n = m_free(n); | |
5269 | } while (len > 0 && n); | |
5270 | if (len > 0) { | |
5271 | (void) m_free(m); | |
5272 | goto bad; | |
5273 | } | |
5274 | m->m_next = n; | |
5275 | return (m); | |
5276 | bad: | |
5277 | m_freem(n); | |
5278 | MSFail++; | |
5279 | return (NULL); | |
5280 | } | |
5281 | ||
1c79356b A |
5282 | /* |
5283 | * Partition an mbuf chain in two pieces, returning the tail -- | |
5284 | * all but the first len0 bytes. In case of failure, it returns NULL and | |
5285 | * attempts to restore the chain to its original state. | |
5286 | */ | |
5287 | struct mbuf * | |
2d21ac55 | 5288 | m_split(struct mbuf *m0, int len0, int wait) |
b0d623f7 A |
5289 | { |
5290 | return (m_split0(m0, len0, wait, 1)); | |
5291 | } | |
5292 | ||
5293 | static struct mbuf * | |
5294 | m_split0(struct mbuf *m0, int len0, int wait, int copyhdr) | |
1c79356b | 5295 | { |
91447636 | 5296 | struct mbuf *m, *n; |
1c79356b A |
5297 | unsigned len = len0, remain; |
5298 | ||
5299 | for (m = m0; m && len > m->m_len; m = m->m_next) | |
5300 | len -= m->m_len; | |
2d21ac55 A |
5301 | if (m == NULL) |
5302 | return (NULL); | |
1c79356b | 5303 | remain = m->m_len - len; |
b0d623f7 | 5304 | if (copyhdr && (m0->m_flags & M_PKTHDR)) { |
2d21ac55 A |
5305 | _MGETHDR(n, wait, m0->m_type); |
5306 | if (n == NULL) | |
5307 | return (NULL); | |
1c79356b A |
5308 | n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; |
5309 | n->m_pkthdr.len = m0->m_pkthdr.len - len0; | |
5310 | m0->m_pkthdr.len = len0; | |
5311 | if (m->m_flags & M_EXT) | |
5312 | goto extpacket; | |
5313 | if (remain > MHLEN) { | |
5314 | /* m can't be the lead packet */ | |
5315 | MH_ALIGN(n, 0); | |
5316 | n->m_next = m_split(m, len, wait); | |
2d21ac55 | 5317 | if (n->m_next == NULL) { |
1c79356b | 5318 | (void) m_free(n); |
2d21ac55 | 5319 | return (NULL); |
1c79356b A |
5320 | } else |
5321 | return (n); | |
5322 | } else | |
5323 | MH_ALIGN(n, remain); | |
5324 | } else if (remain == 0) { | |
5325 | n = m->m_next; | |
2d21ac55 | 5326 | m->m_next = NULL; |
1c79356b A |
5327 | return (n); |
5328 | } else { | |
2d21ac55 A |
5329 | _MGET(n, wait, m->m_type); |
5330 | if (n == NULL) | |
5331 | return (NULL); | |
1c79356b A |
5332 | M_ALIGN(n, remain); |
5333 | } | |
5334 | extpacket: | |
5335 | if (m->m_flags & M_EXT) { | |
5336 | n->m_flags |= M_EXT; | |
0b4e3aa0 | 5337 | n->m_ext = m->m_ext; |
2d21ac55 | 5338 | m_incref(m); |
1c79356b A |
5339 | n->m_data = m->m_data + len; |
5340 | } else { | |
2d21ac55 | 5341 | bcopy(MTOD(m, caddr_t) + len, MTOD(n, caddr_t), remain); |
1c79356b A |
5342 | } |
5343 | n->m_len = remain; | |
5344 | m->m_len = len; | |
5345 | n->m_next = m->m_next; | |
2d21ac55 | 5346 | m->m_next = NULL; |
1c79356b A |
5347 | return (n); |
5348 | } | |
2d21ac55 | 5349 | |
1c79356b A |
5350 | /* |
5351 | * Routine to copy from device local memory into mbufs. | |
5352 | */ | |
5353 | struct mbuf * | |
2d21ac55 A |
5354 | m_devget(char *buf, int totlen, int off0, struct ifnet *ifp, |
5355 | void (*copy)(const void *, void *, size_t)) | |
1c79356b | 5356 | { |
91447636 | 5357 | struct mbuf *m; |
2d21ac55 | 5358 | struct mbuf *top = NULL, **mp = ⊤ |
91447636 A |
5359 | int off = off0, len; |
5360 | char *cp; | |
1c79356b A |
5361 | char *epkt; |
5362 | ||
5363 | cp = buf; | |
5364 | epkt = cp + totlen; | |
5365 | if (off) { | |
5366 | /* | |
5367 | * If 'off' is non-zero, packet is trailer-encapsulated, | |
5368 | * so we have to skip the type and length fields. | |
5369 | */ | |
2d21ac55 A |
5370 | cp += off + 2 * sizeof (u_int16_t); |
5371 | totlen -= 2 * sizeof (u_int16_t); | |
1c79356b | 5372 | } |
2d21ac55 A |
5373 | _MGETHDR(m, M_DONTWAIT, MT_DATA); |
5374 | if (m == NULL) | |
5375 | return (NULL); | |
1c79356b A |
5376 | m->m_pkthdr.rcvif = ifp; |
5377 | m->m_pkthdr.len = totlen; | |
5378 | m->m_len = MHLEN; | |
5379 | ||
5380 | while (totlen > 0) { | |
2d21ac55 A |
5381 | if (top != NULL) { |
5382 | _MGET(m, M_DONTWAIT, MT_DATA); | |
5383 | if (m == NULL) { | |
1c79356b | 5384 | m_freem(top); |
2d21ac55 | 5385 | return (NULL); |
1c79356b A |
5386 | } |
5387 | m->m_len = MLEN; | |
5388 | } | |
2d21ac55 | 5389 | len = MIN(totlen, epkt - cp); |
1c79356b A |
5390 | if (len >= MINCLSIZE) { |
5391 | MCLGET(m, M_DONTWAIT); | |
2d21ac55 A |
5392 | if (m->m_flags & M_EXT) { |
5393 | m->m_len = len = MIN(len, m_maxsize(MC_CL)); | |
5394 | } else { | |
5395 | /* give up when it's out of cluster mbufs */ | |
5396 | if (top != NULL) | |
5397 | m_freem(top); | |
1c79356b | 5398 | m_freem(m); |
2d21ac55 | 5399 | return (NULL); |
1c79356b A |
5400 | } |
5401 | } else { | |
5402 | /* | |
5403 | * Place initial small packet/header at end of mbuf. | |
5404 | */ | |
5405 | if (len < m->m_len) { | |
2d21ac55 A |
5406 | if (top == NULL && |
5407 | len + max_linkhdr <= m->m_len) | |
1c79356b A |
5408 | m->m_data += max_linkhdr; |
5409 | m->m_len = len; | |
2d21ac55 | 5410 | } else { |
1c79356b | 5411 | len = m->m_len; |
2d21ac55 | 5412 | } |
1c79356b A |
5413 | } |
5414 | if (copy) | |
2d21ac55 | 5415 | copy(cp, MTOD(m, caddr_t), (unsigned)len); |
1c79356b | 5416 | else |
2d21ac55 | 5417 | bcopy(cp, MTOD(m, caddr_t), (unsigned)len); |
1c79356b A |
5418 | cp += len; |
5419 | *mp = m; | |
5420 | mp = &m->m_next; | |
5421 | totlen -= len; | |
5422 | if (cp == epkt) | |
5423 | cp = buf; | |
5424 | } | |
5425 | return (top); | |
5426 | } | |
5427 | ||
6d2010ae A |
5428 | #ifndef MBUF_GROWTH_NORMAL_THRESH |
5429 | #define MBUF_GROWTH_NORMAL_THRESH 25 | |
5430 | #endif | |
b0d623f7 | 5431 | |
1c79356b | 5432 | /* |
2d21ac55 | 5433 | * Cluster freelist allocation check. |
1c79356b A |
5434 | */ |
5435 | static int | |
91447636 | 5436 | m_howmany(int num, size_t bufsize) |
1c79356b | 5437 | { |
2d21ac55 | 5438 | int i = 0, j = 0; |
6d2010ae A |
5439 | u_int32_t m_mbclusters, m_clusters, m_bigclusters, m_16kclusters; |
5440 | u_int32_t m_mbfree, m_clfree, m_bigclfree, m_16kclfree; | |
5441 | u_int32_t sumclusters, freeclusters; | |
5442 | u_int32_t percent_pool, percent_kmem; | |
5443 | u_int32_t mb_growth, mb_growth_thresh; | |
5444 | ||
5445 | VERIFY(bufsize == m_maxsize(MC_BIGCL) || | |
5446 | bufsize == m_maxsize(MC_16KCL)); | |
2d21ac55 A |
5447 | |
5448 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
5449 | ||
6d2010ae A |
5450 | /* Numbers in 2K cluster units */ |
5451 | m_mbclusters = m_total(MC_MBUF) >> NMBPCLSHIFT; | |
2d21ac55 | 5452 | m_clusters = m_total(MC_CL); |
6d2010ae | 5453 | m_bigclusters = m_total(MC_BIGCL) << NCLPBGSHIFT; |
2d21ac55 | 5454 | m_16kclusters = m_total(MC_16KCL); |
6d2010ae A |
5455 | sumclusters = m_mbclusters + m_clusters + m_bigclusters; |
5456 | ||
5457 | m_mbfree = m_infree(MC_MBUF) >> NMBPCLSHIFT; | |
2d21ac55 | 5458 | m_clfree = m_infree(MC_CL); |
6d2010ae | 5459 | m_bigclfree = m_infree(MC_BIGCL) << NCLPBGSHIFT; |
2d21ac55 | 5460 | m_16kclfree = m_infree(MC_16KCL); |
6d2010ae | 5461 | freeclusters = m_mbfree + m_clfree + m_bigclfree; |
2d21ac55 | 5462 | |
91447636 | 5463 | /* Bail if we've maxed out the mbuf memory map */ |
6d2010ae | 5464 | if ((bufsize == m_maxsize(MC_BIGCL) && sumclusters >= nclusters) || |
2d21ac55 | 5465 | (njcl > 0 && bufsize == m_maxsize(MC_16KCL) && |
6d2010ae | 5466 | (m_16kclusters << NCLPJCLSHIFT) >= njcl)) { |
2d21ac55 A |
5467 | return (0); |
5468 | } | |
5469 | ||
6d2010ae | 5470 | if (bufsize == m_maxsize(MC_BIGCL)) { |
2d21ac55 | 5471 | /* Under minimum */ |
6d2010ae A |
5472 | if (m_bigclusters < m_minlimit(MC_BIGCL)) |
5473 | return (m_minlimit(MC_BIGCL) - m_bigclusters); | |
5474 | ||
5475 | percent_pool = | |
5476 | ((sumclusters - freeclusters) * 100) / sumclusters; | |
5477 | percent_kmem = (sumclusters * 100) / nclusters; | |
5478 | ||
5479 | /* | |
5480 | * If a light/normal user, grow conservatively (75%) | |
5481 | * If a heavy user, grow aggressively (50%) | |
5482 | */ | |
5483 | if (percent_kmem < MBUF_GROWTH_NORMAL_THRESH) | |
5484 | mb_growth = MB_GROWTH_NORMAL; | |
5485 | else | |
5486 | mb_growth = MB_GROWTH_AGGRESSIVE; | |
5487 | ||
5488 | if (percent_kmem < 5) { | |
5489 | /* For initial allocations */ | |
5490 | i = num; | |
5491 | } else { | |
5492 | /* Return if >= MBIGCL_LOWAT clusters available */ | |
5493 | if (m_infree(MC_BIGCL) >= MBIGCL_LOWAT && | |
5494 | m_total(MC_BIGCL) >= | |
5495 | MBIGCL_LOWAT + m_minlimit(MC_BIGCL)) | |
2d21ac55 | 5496 | return (0); |
6d2010ae A |
5497 | |
5498 | /* Ensure at least num clusters are accessible */ | |
5499 | if (num >= m_infree(MC_BIGCL)) | |
5500 | i = num - m_infree(MC_BIGCL); | |
5501 | if (num > m_total(MC_BIGCL) - m_minlimit(MC_BIGCL)) | |
5502 | j = num - (m_total(MC_BIGCL) - | |
5503 | m_minlimit(MC_BIGCL)); | |
5504 | ||
2d21ac55 | 5505 | i = MAX(i, j); |
6d2010ae A |
5506 | |
5507 | /* | |
5508 | * Grow pool if percent_pool > 75 (normal growth) | |
5509 | * or percent_pool > 50 (aggressive growth). | |
5510 | */ | |
5511 | mb_growth_thresh = 100 - (100 / (1 << mb_growth)); | |
5512 | if (percent_pool > mb_growth_thresh) | |
5513 | j = ((sumclusters + num) >> mb_growth) - | |
5514 | freeclusters; | |
2d21ac55 | 5515 | i = MAX(i, j); |
2d21ac55 | 5516 | } |
6d2010ae A |
5517 | |
5518 | /* Check to ensure we didn't go over limits */ | |
5519 | if (i + m_bigclusters >= m_maxlimit(MC_BIGCL)) | |
5520 | i = m_maxlimit(MC_BIGCL) - m_bigclusters; | |
5521 | if ((i << 1) + sumclusters >= nclusters) | |
5522 | i = (nclusters - sumclusters) >> 1; | |
2d21ac55 | 5523 | VERIFY((m_total(MC_BIGCL) + i) <= m_maxlimit(MC_BIGCL)); |
6d2010ae A |
5524 | VERIFY(sumclusters + (i << 1) <= nclusters); |
5525 | ||
5526 | } else { /* 16K CL */ | |
2d21ac55 | 5527 | VERIFY(njcl > 0); |
6d2010ae A |
5528 | /* Ensure at least num clusters are available */ |
5529 | if (num >= m_16kclfree) | |
5530 | i = num - m_16kclfree; | |
5531 | ||
5532 | /* Always grow 16KCL pool aggressively */ | |
5533 | if (((m_16kclusters + num) >> 1) > m_16kclfree) | |
5534 | j = ((m_16kclusters + num) >> 1) - m_16kclfree; | |
5535 | i = MAX(i, j); | |
5536 | ||
5537 | /* Check to ensure we don't go over limit */ | |
5538 | if (i + m_16kclusters >= m_maxlimit(MC_16KCL)) | |
5539 | i = m_maxlimit(MC_16KCL) - m_16kclusters; | |
2d21ac55 | 5540 | VERIFY((m_total(MC_16KCL) + i) <= m_maxlimit(MC_16KCL)); |
91447636 | 5541 | } |
2d21ac55 | 5542 | return (i); |
1c79356b | 5543 | } |
b0d623f7 A |
5544 | /* |
5545 | * Return the number of bytes in the mbuf chain, m. | |
6d2010ae A |
5546 | */ |
5547 | unsigned int | |
b0d623f7 A |
5548 | m_length(struct mbuf *m) |
5549 | { | |
5550 | struct mbuf *m0; | |
5551 | unsigned int pktlen; | |
5552 | ||
5553 | if (m->m_flags & M_PKTHDR) | |
5554 | return (m->m_pkthdr.len); | |
5555 | ||
5556 | pktlen = 0; | |
5557 | for (m0 = m; m0 != NULL; m0 = m0->m_next) | |
5558 | pktlen += m0->m_len; | |
5559 | return (pktlen); | |
5560 | } | |
5561 | ||
1c79356b A |
5562 | /* |
5563 | * Copy data from a buffer back into the indicated mbuf chain, | |
5564 | * starting "off" bytes from the beginning, extending the mbuf | |
5565 | * chain if necessary. | |
5566 | */ | |
5567 | void | |
b0d623f7 | 5568 | m_copyback(struct mbuf *m0, int off, int len, const void *cp) |
1c79356b | 5569 | { |
b0d623f7 A |
5570 | #if DEBUG |
5571 | struct mbuf *origm = m0; | |
5572 | int error; | |
5573 | #endif /* DEBUG */ | |
1c79356b | 5574 | |
2d21ac55 | 5575 | if (m0 == NULL) |
1c79356b | 5576 | return; |
b0d623f7 A |
5577 | |
5578 | #if DEBUG | |
5579 | error = | |
5580 | #endif /* DEBUG */ | |
5581 | m_copyback0(&m0, off, len, cp, | |
5582 | M_COPYBACK0_COPYBACK | M_COPYBACK0_EXTEND, M_DONTWAIT); | |
5583 | ||
5584 | #if DEBUG | |
5585 | if (error != 0 || (m0 != NULL && origm != m0)) | |
5586 | panic("m_copyback"); | |
5587 | #endif /* DEBUG */ | |
5588 | } | |
5589 | ||
5590 | struct mbuf * | |
5591 | m_copyback_cow(struct mbuf *m0, int off, int len, const void *cp, int how) | |
5592 | { | |
5593 | int error; | |
5594 | ||
5595 | /* don't support chain expansion */ | |
5596 | VERIFY(off + len <= m_length(m0)); | |
5597 | ||
5598 | error = m_copyback0(&m0, off, len, cp, | |
5599 | M_COPYBACK0_COPYBACK | M_COPYBACK0_COW, how); | |
5600 | if (error) { | |
5601 | /* | |
5602 | * no way to recover from partial success. | |
5603 | * just free the chain. | |
5604 | */ | |
5605 | m_freem(m0); | |
5606 | return (NULL); | |
5607 | } | |
5608 | return (m0); | |
5609 | } | |
5610 | ||
5611 | /* | |
5612 | * m_makewritable: ensure the specified range writable. | |
5613 | */ | |
5614 | int | |
5615 | m_makewritable(struct mbuf **mp, int off, int len, int how) | |
5616 | { | |
5617 | int error; | |
5618 | #if DEBUG | |
5619 | struct mbuf *n; | |
5620 | int origlen, reslen; | |
5621 | ||
5622 | origlen = m_length(*mp); | |
5623 | #endif /* DEBUG */ | |
5624 | ||
5625 | #if 0 /* M_COPYALL is large enough */ | |
5626 | if (len == M_COPYALL) | |
5627 | len = m_length(*mp) - off; /* XXX */ | |
5628 | #endif | |
5629 | ||
5630 | error = m_copyback0(mp, off, len, NULL, | |
5631 | M_COPYBACK0_PRESERVE | M_COPYBACK0_COW, how); | |
5632 | ||
5633 | #if DEBUG | |
5634 | reslen = 0; | |
5635 | for (n = *mp; n; n = n->m_next) | |
5636 | reslen += n->m_len; | |
5637 | if (origlen != reslen) | |
5638 | panic("m_makewritable: length changed"); | |
5639 | if (((*mp)->m_flags & M_PKTHDR) && reslen != (*mp)->m_pkthdr.len) | |
5640 | panic("m_makewritable: inconsist"); | |
5641 | #endif /* DEBUG */ | |
5642 | ||
5643 | return (error); | |
5644 | } | |
5645 | ||
5646 | static int | |
5647 | m_copyback0(struct mbuf **mp0, int off, int len, const void *vp, int flags, | |
5648 | int how) | |
5649 | { | |
5650 | int mlen; | |
5651 | struct mbuf *m, *n; | |
5652 | struct mbuf **mp; | |
5653 | int totlen = 0; | |
5654 | const char *cp = vp; | |
5655 | ||
5656 | VERIFY(mp0 != NULL); | |
5657 | VERIFY(*mp0 != NULL); | |
5658 | VERIFY((flags & M_COPYBACK0_PRESERVE) == 0 || cp == NULL); | |
5659 | VERIFY((flags & M_COPYBACK0_COPYBACK) == 0 || cp != NULL); | |
5660 | ||
5661 | /* | |
5662 | * we don't bother to update "totlen" in the case of M_COPYBACK0_COW, | |
5663 | * assuming that M_COPYBACK0_EXTEND and M_COPYBACK0_COW are exclusive. | |
5664 | */ | |
5665 | ||
5666 | VERIFY((~flags & (M_COPYBACK0_EXTEND|M_COPYBACK0_COW)) != 0); | |
5667 | ||
5668 | mp = mp0; | |
5669 | m = *mp; | |
1c79356b A |
5670 | while (off > (mlen = m->m_len)) { |
5671 | off -= mlen; | |
5672 | totlen += mlen; | |
2d21ac55 | 5673 | if (m->m_next == NULL) { |
b0d623f7 A |
5674 | int tspace; |
5675 | extend: | |
5676 | if (!(flags & M_COPYBACK0_EXTEND)) | |
1c79356b | 5677 | goto out; |
b0d623f7 A |
5678 | |
5679 | /* | |
5680 | * try to make some space at the end of "m". | |
5681 | */ | |
5682 | ||
5683 | mlen = m->m_len; | |
5684 | if (off + len >= MINCLSIZE && | |
5685 | !(m->m_flags & M_EXT) && m->m_len == 0) { | |
5686 | MCLGET(m, how); | |
5687 | } | |
5688 | tspace = M_TRAILINGSPACE(m); | |
5689 | if (tspace > 0) { | |
5690 | tspace = MIN(tspace, off + len); | |
5691 | VERIFY(tspace > 0); | |
5692 | bzero(mtod(m, char *) + m->m_len, | |
5693 | MIN(off, tspace)); | |
5694 | m->m_len += tspace; | |
5695 | off += mlen; | |
5696 | totlen -= mlen; | |
5697 | continue; | |
5698 | } | |
5699 | ||
5700 | /* | |
5701 | * need to allocate an mbuf. | |
5702 | */ | |
5703 | ||
5704 | if (off + len >= MINCLSIZE) { | |
5705 | n = m_getcl(how, m->m_type, 0); | |
5706 | } else { | |
5707 | n = _M_GET(how, m->m_type); | |
5708 | } | |
5709 | if (n == NULL) { | |
5710 | goto out; | |
5711 | } | |
5712 | n->m_len = 0; | |
5713 | n->m_len = MIN(M_TRAILINGSPACE(n), off + len); | |
5714 | bzero(mtod(n, char *), MIN(n->m_len, off)); | |
1c79356b A |
5715 | m->m_next = n; |
5716 | } | |
b0d623f7 | 5717 | mp = &m->m_next; |
1c79356b A |
5718 | m = m->m_next; |
5719 | } | |
5720 | while (len > 0) { | |
b0d623f7 A |
5721 | mlen = m->m_len - off; |
5722 | if (mlen != 0 && m_mclhasreference(m)) { | |
5723 | char *datap; | |
5724 | int eatlen; | |
5725 | ||
5726 | /* | |
5727 | * this mbuf is read-only. | |
5728 | * allocate a new writable mbuf and try again. | |
5729 | */ | |
5730 | ||
39236c6e | 5731 | #if DIAGNOSTIC |
b0d623f7 A |
5732 | if (!(flags & M_COPYBACK0_COW)) |
5733 | panic("m_copyback0: read-only"); | |
39236c6e | 5734 | #endif /* DIAGNOSTIC */ |
b0d623f7 A |
5735 | |
5736 | /* | |
5737 | * if we're going to write into the middle of | |
5738 | * a mbuf, split it first. | |
5739 | */ | |
5740 | if (off > 0 && len < mlen) { | |
5741 | n = m_split0(m, off, how, 0); | |
5742 | if (n == NULL) | |
5743 | goto enobufs; | |
5744 | m->m_next = n; | |
5745 | mp = &m->m_next; | |
5746 | m = n; | |
5747 | off = 0; | |
5748 | continue; | |
5749 | } | |
5750 | ||
5751 | /* | |
5752 | * XXX TODO coalesce into the trailingspace of | |
5753 | * the previous mbuf when possible. | |
5754 | */ | |
5755 | ||
5756 | /* | |
5757 | * allocate a new mbuf. copy packet header if needed. | |
5758 | */ | |
5759 | n = _M_GET(how, m->m_type); | |
5760 | if (n == NULL) | |
5761 | goto enobufs; | |
5762 | if (off == 0 && (m->m_flags & M_PKTHDR)) { | |
5763 | M_COPY_PKTHDR(n, m); | |
5764 | n->m_len = MHLEN; | |
5765 | } else { | |
5766 | if (len >= MINCLSIZE) | |
5767 | MCLGET(n, M_DONTWAIT); | |
5768 | n->m_len = | |
5769 | (n->m_flags & M_EXT) ? MCLBYTES : MLEN; | |
5770 | } | |
5771 | if (n->m_len > len) | |
5772 | n->m_len = len; | |
5773 | ||
5774 | /* | |
5775 | * free the region which has been overwritten. | |
5776 | * copying data from old mbufs if requested. | |
5777 | */ | |
5778 | if (flags & M_COPYBACK0_PRESERVE) | |
5779 | datap = mtod(n, char *); | |
5780 | else | |
5781 | datap = NULL; | |
5782 | eatlen = n->m_len; | |
5783 | VERIFY(off == 0 || eatlen >= mlen); | |
5784 | if (off > 0) { | |
5785 | VERIFY(len >= mlen); | |
5786 | m->m_len = off; | |
5787 | m->m_next = n; | |
5788 | if (datap) { | |
5789 | m_copydata(m, off, mlen, datap); | |
5790 | datap += mlen; | |
5791 | } | |
5792 | eatlen -= mlen; | |
5793 | mp = &m->m_next; | |
5794 | m = m->m_next; | |
5795 | } | |
5796 | while (m != NULL && m_mclhasreference(m) && | |
5797 | n->m_type == m->m_type && eatlen > 0) { | |
5798 | mlen = MIN(eatlen, m->m_len); | |
5799 | if (datap) { | |
5800 | m_copydata(m, 0, mlen, datap); | |
5801 | datap += mlen; | |
5802 | } | |
5803 | m->m_data += mlen; | |
5804 | m->m_len -= mlen; | |
5805 | eatlen -= mlen; | |
5806 | if (m->m_len == 0) | |
5807 | *mp = m = m_free(m); | |
5808 | } | |
5809 | if (eatlen > 0) | |
5810 | n->m_len -= eatlen; | |
5811 | n->m_next = m; | |
5812 | *mp = m = n; | |
5813 | continue; | |
5814 | } | |
5815 | mlen = MIN(mlen, len); | |
5816 | if (flags & M_COPYBACK0_COPYBACK) { | |
5817 | bcopy(cp, mtod(m, caddr_t) + off, (unsigned)mlen); | |
5818 | cp += mlen; | |
5819 | } | |
1c79356b A |
5820 | len -= mlen; |
5821 | mlen += off; | |
5822 | off = 0; | |
5823 | totlen += mlen; | |
5824 | if (len == 0) | |
5825 | break; | |
2d21ac55 | 5826 | if (m->m_next == NULL) { |
b0d623f7 | 5827 | goto extend; |
1c79356b | 5828 | } |
b0d623f7 | 5829 | mp = &m->m_next; |
1c79356b A |
5830 | m = m->m_next; |
5831 | } | |
2d21ac55 | 5832 | out: |
b0d623f7 A |
5833 | if (((m = *mp0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) { |
5834 | VERIFY(flags & M_COPYBACK0_EXTEND); | |
1c79356b | 5835 | m->m_pkthdr.len = totlen; |
b0d623f7 A |
5836 | } |
5837 | ||
5838 | return (0); | |
5839 | ||
5840 | enobufs: | |
5841 | return (ENOBUFS); | |
1c79356b A |
5842 | } |
5843 | ||
39236c6e | 5844 | uint64_t |
2d21ac55 A |
5845 | mcl_to_paddr(char *addr) |
5846 | { | |
b0d623f7 | 5847 | vm_offset_t base_phys; |
1c79356b | 5848 | |
2d21ac55 | 5849 | if (!MBUF_IN_MAP(addr)) |
39236c6e A |
5850 | return (0); |
5851 | base_phys = mcl_paddr[atop_64(addr - (char *)mbutl)]; | |
1c79356b A |
5852 | |
5853 | if (base_phys == 0) | |
39236c6e A |
5854 | return (0); |
5855 | return ((uint64_t)(ptoa_64(base_phys) | ((uint64_t)addr & PAGE_MASK))); | |
1c79356b A |
5856 | } |
5857 | ||
5858 | /* | |
5859 | * Dup the mbuf chain passed in. The whole thing. No cute additional cruft. | |
5860 | * And really copy the thing. That way, we don't "precompute" checksums | |
2d21ac55 A |
5861 | * for unsuspecting consumers. Assumption: m->m_nextpkt == 0. Trick: for |
5862 | * small packets, don't dup into a cluster. That way received packets | |
5863 | * don't take up too much room in the sockbuf (cf. sbspace()). | |
1c79356b A |
5864 | */ |
5865 | int MDFail; | |
5866 | ||
5867 | struct mbuf * | |
91447636 | 5868 | m_dup(struct mbuf *m, int how) |
2d21ac55 | 5869 | { |
91447636 | 5870 | struct mbuf *n, **np; |
1c79356b A |
5871 | struct mbuf *top; |
5872 | int copyhdr = 0; | |
5873 | ||
5874 | np = ⊤ | |
2d21ac55 | 5875 | top = NULL; |
1c79356b A |
5876 | if (m->m_flags & M_PKTHDR) |
5877 | copyhdr = 1; | |
5878 | ||
5879 | /* | |
5880 | * Quick check: if we have one mbuf and its data fits in an | |
5881 | * mbuf with packet header, just copy and go. | |
5882 | */ | |
2d21ac55 A |
5883 | if (m->m_next == NULL) { |
5884 | /* Then just move the data into an mbuf and be done... */ | |
5885 | if (copyhdr) { | |
5886 | if (m->m_pkthdr.len <= MHLEN && m->m_len <= MHLEN) { | |
5887 | if ((n = _M_GETHDR(how, m->m_type)) == NULL) | |
5888 | return (NULL); | |
1c79356b | 5889 | n->m_len = m->m_len; |
3a60a9f5 A |
5890 | m_dup_pkthdr(n, m, how); |
5891 | bcopy(m->m_data, n->m_data, m->m_len); | |
2d21ac55 | 5892 | return (n); |
1c79356b | 5893 | } |
2d21ac55 A |
5894 | } else if (m->m_len <= MLEN) { |
5895 | if ((n = _M_GET(how, m->m_type)) == NULL) | |
5896 | return (NULL); | |
1c79356b A |
5897 | bcopy(m->m_data, n->m_data, m->m_len); |
5898 | n->m_len = m->m_len; | |
2d21ac55 | 5899 | return (n); |
1c79356b A |
5900 | } |
5901 | } | |
2d21ac55 | 5902 | while (m != NULL) { |
1c79356b | 5903 | #if BLUE_DEBUG |
39037602 | 5904 | printf("<%x: %x, %x, %x\n", m, m->m_flags, m->m_len, |
2d21ac55 | 5905 | m->m_data); |
1c79356b A |
5906 | #endif |
5907 | if (copyhdr) | |
2d21ac55 | 5908 | n = _M_GETHDR(how, m->m_type); |
1c79356b | 5909 | else |
2d21ac55 A |
5910 | n = _M_GET(how, m->m_type); |
5911 | if (n == NULL) | |
1c79356b | 5912 | goto nospace; |
2d21ac55 A |
5913 | if (m->m_flags & M_EXT) { |
5914 | if (m->m_len <= m_maxsize(MC_CL)) | |
5915 | MCLGET(n, how); | |
5916 | else if (m->m_len <= m_maxsize(MC_BIGCL)) | |
5917 | n = m_mbigget(n, how); | |
5918 | else if (m->m_len <= m_maxsize(MC_16KCL) && njcl > 0) | |
5919 | n = m_m16kget(n, how); | |
5920 | if (!(n->m_flags & M_EXT)) { | |
5921 | (void) m_free(n); | |
1c79356b | 5922 | goto nospace; |
2d21ac55 | 5923 | } |
1c79356b A |
5924 | } |
5925 | *np = n; | |
2d21ac55 A |
5926 | if (copyhdr) { |
5927 | /* Don't use M_COPY_PKTHDR: preserve m_data */ | |
3a60a9f5 | 5928 | m_dup_pkthdr(n, m, how); |
1c79356b | 5929 | copyhdr = 0; |
2d21ac55 | 5930 | if (!(n->m_flags & M_EXT)) |
1c79356b A |
5931 | n->m_data = n->m_pktdat; |
5932 | } | |
5933 | n->m_len = m->m_len; | |
5934 | /* | |
5935 | * Get the dup on the same bdry as the original | |
5936 | * Assume that the two mbufs have the same offset to data area | |
2d21ac55 | 5937 | * (up to word boundaries) |
1c79356b | 5938 | */ |
2d21ac55 | 5939 | bcopy(MTOD(m, caddr_t), MTOD(n, caddr_t), (unsigned)n->m_len); |
1c79356b A |
5940 | m = m->m_next; |
5941 | np = &n->m_next; | |
5942 | #if BLUE_DEBUG | |
39037602 | 5943 | printf(">%x: %x, %x, %x\n", n, n->m_flags, n->m_len, |
2d21ac55 | 5944 | n->m_data); |
1c79356b A |
5945 | #endif |
5946 | } | |
5947 | ||
2d21ac55 | 5948 | if (top == NULL) |
1c79356b A |
5949 | MDFail++; |
5950 | return (top); | |
2d21ac55 A |
5951 | |
5952 | nospace: | |
1c79356b A |
5953 | m_freem(top); |
5954 | MDFail++; | |
2d21ac55 | 5955 | return (NULL); |
1c79356b A |
5956 | } |
5957 | ||
2d21ac55 A |
5958 | #define MBUF_MULTIPAGES(m) \ |
5959 | (((m)->m_flags & M_EXT) && \ | |
3e170ce0 A |
5960 | ((IS_P2ALIGNED((m)->m_data, PAGE_SIZE) \ |
5961 | && (m)->m_len > PAGE_SIZE) || \ | |
5962 | (!IS_P2ALIGNED((m)->m_data, PAGE_SIZE) && \ | |
5963 | P2ROUNDUP((m)->m_data, PAGE_SIZE) < ((uintptr_t)(m)->m_data + (m)->m_len)))) | |
2d21ac55 A |
5964 | |
5965 | static struct mbuf * | |
5966 | m_expand(struct mbuf *m, struct mbuf **last) | |
9bccf70c | 5967 | { |
2d21ac55 A |
5968 | struct mbuf *top = NULL; |
5969 | struct mbuf **nm = ⊤ | |
5970 | uintptr_t data0, data; | |
5971 | unsigned int len0, len; | |
5972 | ||
5973 | VERIFY(MBUF_MULTIPAGES(m)); | |
5974 | VERIFY(m->m_next == NULL); | |
5975 | data0 = (uintptr_t)m->m_data; | |
5976 | len0 = m->m_len; | |
5977 | *last = top; | |
5978 | ||
5979 | for (;;) { | |
5980 | struct mbuf *n; | |
5981 | ||
5982 | data = data0; | |
3e170ce0 A |
5983 | if (IS_P2ALIGNED(data, PAGE_SIZE) && len0 > PAGE_SIZE) |
5984 | len = PAGE_SIZE; | |
5985 | else if (!IS_P2ALIGNED(data, PAGE_SIZE) && | |
5986 | P2ROUNDUP(data, PAGE_SIZE) < (data + len0)) | |
5987 | len = P2ROUNDUP(data, PAGE_SIZE) - data; | |
2d21ac55 A |
5988 | else |
5989 | len = len0; | |
5990 | ||
5991 | VERIFY(len > 0); | |
5992 | VERIFY(m->m_flags & M_EXT); | |
5993 | m->m_data = (void *)data; | |
5994 | m->m_len = len; | |
5995 | ||
5996 | *nm = *last = m; | |
5997 | nm = &m->m_next; | |
5998 | m->m_next = NULL; | |
5999 | ||
6000 | data0 += len; | |
6001 | len0 -= len; | |
6002 | if (len0 == 0) | |
6003 | break; | |
6004 | ||
6005 | n = _M_RETRY(M_DONTWAIT, MT_DATA); | |
6006 | if (n == NULL) { | |
6007 | m_freem(top); | |
6008 | top = *last = NULL; | |
6009 | break; | |
6010 | } | |
6011 | ||
6012 | n->m_ext = m->m_ext; | |
6013 | m_incref(m); | |
6014 | n->m_flags |= M_EXT; | |
6015 | m = n; | |
6016 | } | |
6017 | return (top); | |
9bccf70c A |
6018 | } |
6019 | ||
2d21ac55 A |
6020 | struct mbuf * |
6021 | m_normalize(struct mbuf *m) | |
9bccf70c | 6022 | { |
2d21ac55 A |
6023 | struct mbuf *top = NULL; |
6024 | struct mbuf **nm = ⊤ | |
6025 | boolean_t expanded = FALSE; | |
6026 | ||
6027 | while (m != NULL) { | |
6028 | struct mbuf *n; | |
6029 | ||
6030 | n = m->m_next; | |
6031 | m->m_next = NULL; | |
6032 | ||
6033 | /* Does the data cross one or more page boundaries? */ | |
6034 | if (MBUF_MULTIPAGES(m)) { | |
6035 | struct mbuf *last; | |
6036 | if ((m = m_expand(m, &last)) == NULL) { | |
6037 | m_freem(n); | |
6038 | m_freem(top); | |
6039 | top = NULL; | |
6040 | break; | |
6041 | } | |
6042 | *nm = m; | |
6043 | nm = &last->m_next; | |
6044 | expanded = TRUE; | |
6045 | } else { | |
6046 | *nm = m; | |
6047 | nm = &m->m_next; | |
6048 | } | |
6049 | m = n; | |
6050 | } | |
6051 | if (expanded) | |
6052 | atomic_add_32(&mb_normalized, 1); | |
6053 | return (top); | |
9bccf70c A |
6054 | } |
6055 | ||
6d2010ae A |
6056 | /* |
6057 | * Append the specified data to the indicated mbuf chain, | |
6058 | * Extend the mbuf chain if the new data does not fit in | |
6059 | * existing space. | |
6060 | * | |
6061 | * Return 1 if able to complete the job; otherwise 0. | |
6062 | */ | |
6063 | int | |
6064 | m_append(struct mbuf *m0, int len, caddr_t cp) | |
6065 | { | |
6066 | struct mbuf *m, *n; | |
6067 | int remainder, space; | |
6068 | ||
6069 | for (m = m0; m->m_next != NULL; m = m->m_next) | |
6070 | ; | |
6071 | remainder = len; | |
6072 | space = M_TRAILINGSPACE(m); | |
6073 | if (space > 0) { | |
6074 | /* | |
6075 | * Copy into available space. | |
6076 | */ | |
6077 | if (space > remainder) | |
6078 | space = remainder; | |
6079 | bcopy(cp, mtod(m, caddr_t) + m->m_len, space); | |
6080 | m->m_len += space; | |
39037602 A |
6081 | cp += space; |
6082 | remainder -= space; | |
6d2010ae A |
6083 | } |
6084 | while (remainder > 0) { | |
6085 | /* | |
6086 | * Allocate a new mbuf; could check space | |
6087 | * and allocate a cluster instead. | |
6088 | */ | |
6089 | n = m_get(M_WAITOK, m->m_type); | |
6090 | if (n == NULL) | |
6091 | break; | |
6092 | n->m_len = min(MLEN, remainder); | |
6093 | bcopy(cp, mtod(n, caddr_t), n->m_len); | |
6094 | cp += n->m_len; | |
6095 | remainder -= n->m_len; | |
6096 | m->m_next = n; | |
6097 | m = n; | |
6098 | } | |
6099 | if (m0->m_flags & M_PKTHDR) | |
6100 | m0->m_pkthdr.len += len - remainder; | |
6101 | return (remainder == 0); | |
6102 | } | |
6103 | ||
6104 | struct mbuf * | |
6105 | m_last(struct mbuf *m) | |
6106 | { | |
6107 | while (m->m_next != NULL) | |
6108 | m = m->m_next; | |
6109 | return (m); | |
6110 | } | |
6111 | ||
316670eb A |
6112 | unsigned int |
6113 | m_fixhdr(struct mbuf *m0) | |
6114 | { | |
6115 | u_int len; | |
6116 | ||
39236c6e A |
6117 | VERIFY(m0->m_flags & M_PKTHDR); |
6118 | ||
316670eb A |
6119 | len = m_length2(m0, NULL); |
6120 | m0->m_pkthdr.len = len; | |
6121 | return (len); | |
6122 | } | |
6123 | ||
6124 | unsigned int | |
6125 | m_length2(struct mbuf *m0, struct mbuf **last) | |
6126 | { | |
6127 | struct mbuf *m; | |
6128 | u_int len; | |
6129 | ||
6130 | len = 0; | |
6131 | for (m = m0; m != NULL; m = m->m_next) { | |
6132 | len += m->m_len; | |
6133 | if (m->m_next == NULL) | |
6134 | break; | |
6135 | } | |
6136 | if (last != NULL) | |
6137 | *last = m; | |
6138 | return (len); | |
6139 | } | |
6140 | ||
6141 | /* | |
6142 | * Defragment a mbuf chain, returning the shortest possible chain of mbufs | |
6143 | * and clusters. If allocation fails and this cannot be completed, NULL will | |
6144 | * be returned, but the passed in chain will be unchanged. Upon success, | |
6145 | * the original chain will be freed, and the new chain will be returned. | |
6146 | * | |
6147 | * If a non-packet header is passed in, the original mbuf (chain?) will | |
6148 | * be returned unharmed. | |
6149 | * | |
6150 | * If offset is specfied, the first mbuf in the chain will have a leading | |
6151 | * space of the amount stated by the "off" parameter. | |
6152 | * | |
6153 | * This routine requires that the m_pkthdr.header field of the original | |
6154 | * mbuf chain is cleared by the caller. | |
6155 | */ | |
6156 | struct mbuf * | |
6157 | m_defrag_offset(struct mbuf *m0, u_int32_t off, int how) | |
6158 | { | |
6159 | struct mbuf *m_new = NULL, *m_final = NULL; | |
6160 | int progress = 0, length, pktlen; | |
6161 | ||
6162 | if (!(m0->m_flags & M_PKTHDR)) | |
6163 | return (m0); | |
6164 | ||
6165 | VERIFY(off < MHLEN); | |
6166 | m_fixhdr(m0); /* Needed sanity check */ | |
6167 | ||
6168 | pktlen = m0->m_pkthdr.len + off; | |
6169 | if (pktlen > MHLEN) | |
6170 | m_final = m_getcl(how, MT_DATA, M_PKTHDR); | |
6171 | else | |
6172 | m_final = m_gethdr(how, MT_DATA); | |
6173 | ||
6174 | if (m_final == NULL) | |
6175 | goto nospace; | |
6176 | ||
6177 | if (off > 0) { | |
6178 | pktlen -= off; | |
316670eb A |
6179 | m_final->m_data += off; |
6180 | } | |
6181 | ||
6182 | /* | |
6183 | * Caller must have handled the contents pointed to by this | |
6184 | * pointer before coming here, as otherwise it will point to | |
6185 | * the original mbuf which will get freed upon success. | |
6186 | */ | |
39236c6e | 6187 | VERIFY(m0->m_pkthdr.pkt_hdr == NULL); |
316670eb A |
6188 | |
6189 | if (m_dup_pkthdr(m_final, m0, how) == 0) | |
6190 | goto nospace; | |
6191 | ||
6192 | m_new = m_final; | |
6193 | ||
6194 | while (progress < pktlen) { | |
6195 | length = pktlen - progress; | |
6196 | if (length > MCLBYTES) | |
6197 | length = MCLBYTES; | |
39236c6e | 6198 | length -= ((m_new == m_final) ? off : 0); |
316670eb A |
6199 | |
6200 | if (m_new == NULL) { | |
6201 | if (length > MLEN) | |
6202 | m_new = m_getcl(how, MT_DATA, 0); | |
6203 | else | |
6204 | m_new = m_get(how, MT_DATA); | |
6205 | if (m_new == NULL) | |
6206 | goto nospace; | |
6207 | } | |
6208 | ||
6209 | m_copydata(m0, progress, length, mtod(m_new, caddr_t)); | |
6210 | progress += length; | |
6211 | m_new->m_len = length; | |
6212 | if (m_new != m_final) | |
6213 | m_cat(m_final, m_new); | |
6214 | m_new = NULL; | |
6215 | } | |
6216 | m_freem(m0); | |
6217 | m0 = m_final; | |
6218 | return (m0); | |
6219 | nospace: | |
6220 | if (m_final) | |
6221 | m_freem(m_final); | |
6222 | return (NULL); | |
6223 | } | |
6224 | ||
6225 | struct mbuf * | |
6226 | m_defrag(struct mbuf *m0, int how) | |
6227 | { | |
6228 | return (m_defrag_offset(m0, 0, how)); | |
6229 | } | |
6230 | ||
9bccf70c A |
6231 | void |
6232 | m_mchtype(struct mbuf *m, int t) | |
6233 | { | |
2d21ac55 A |
6234 | mtype_stat_inc(t); |
6235 | mtype_stat_dec(m->m_type); | |
6236 | (m)->m_type = t; | |
9bccf70c A |
6237 | } |
6238 | ||
2d21ac55 A |
6239 | void * |
6240 | m_mtod(struct mbuf *m) | |
9bccf70c | 6241 | { |
2d21ac55 | 6242 | return (MTOD(m, void *)); |
9bccf70c A |
6243 | } |
6244 | ||
2d21ac55 A |
6245 | struct mbuf * |
6246 | m_dtom(void *x) | |
9bccf70c | 6247 | { |
b0d623f7 | 6248 | return ((struct mbuf *)((uintptr_t)(x) & ~(MSIZE-1))); |
9bccf70c A |
6249 | } |
6250 | ||
2d21ac55 A |
6251 | void |
6252 | m_mcheck(struct mbuf *m) | |
9bccf70c | 6253 | { |
2d21ac55 | 6254 | _MCHECK(m); |
9bccf70c A |
6255 | } |
6256 | ||
6d2010ae A |
6257 | /* |
6258 | * Return a pointer to mbuf/offset of location in mbuf chain. | |
6259 | */ | |
6260 | struct mbuf * | |
6261 | m_getptr(struct mbuf *m, int loc, int *off) | |
6262 | { | |
6263 | ||
6264 | while (loc >= 0) { | |
6265 | /* Normal end of search. */ | |
6266 | if (m->m_len > loc) { | |
6267 | *off = loc; | |
6268 | return (m); | |
6269 | } else { | |
6270 | loc -= m->m_len; | |
6271 | if (m->m_next == NULL) { | |
6272 | if (loc == 0) { | |
6273 | /* Point at the end of valid data. */ | |
6274 | *off = m->m_len; | |
6275 | return (m); | |
6276 | } | |
6277 | return (NULL); | |
6278 | } | |
6279 | m = m->m_next; | |
6280 | } | |
6281 | } | |
6282 | return (NULL); | |
6283 | } | |
6284 | ||
2d21ac55 A |
6285 | /* |
6286 | * Inform the corresponding mcache(s) that there's a waiter below. | |
6287 | */ | |
6288 | static void | |
6289 | mbuf_waiter_inc(mbuf_class_t class, boolean_t comp) | |
9bccf70c | 6290 | { |
2d21ac55 A |
6291 | mcache_waiter_inc(m_cache(class)); |
6292 | if (comp) { | |
6293 | if (class == MC_CL) { | |
6294 | mcache_waiter_inc(m_cache(MC_MBUF_CL)); | |
6295 | } else if (class == MC_BIGCL) { | |
6296 | mcache_waiter_inc(m_cache(MC_MBUF_BIGCL)); | |
6297 | } else if (class == MC_16KCL) { | |
6298 | mcache_waiter_inc(m_cache(MC_MBUF_16KCL)); | |
6299 | } else { | |
6300 | mcache_waiter_inc(m_cache(MC_MBUF_CL)); | |
6301 | mcache_waiter_inc(m_cache(MC_MBUF_BIGCL)); | |
6302 | } | |
6303 | } | |
9bccf70c A |
6304 | } |
6305 | ||
2d21ac55 A |
6306 | /* |
6307 | * Inform the corresponding mcache(s) that there's no more waiter below. | |
6308 | */ | |
6309 | static void | |
6310 | mbuf_waiter_dec(mbuf_class_t class, boolean_t comp) | |
6311 | { | |
6312 | mcache_waiter_dec(m_cache(class)); | |
6313 | if (comp) { | |
6314 | if (class == MC_CL) { | |
6315 | mcache_waiter_dec(m_cache(MC_MBUF_CL)); | |
6316 | } else if (class == MC_BIGCL) { | |
6317 | mcache_waiter_dec(m_cache(MC_MBUF_BIGCL)); | |
6318 | } else if (class == MC_16KCL) { | |
6319 | mcache_waiter_dec(m_cache(MC_MBUF_16KCL)); | |
6320 | } else { | |
6321 | mcache_waiter_dec(m_cache(MC_MBUF_CL)); | |
6322 | mcache_waiter_dec(m_cache(MC_MBUF_BIGCL)); | |
6323 | } | |
6324 | } | |
6325 | } | |
9bccf70c | 6326 | |
6d2010ae A |
6327 | /* |
6328 | * Called during slab (blocking and non-blocking) allocation. If there | |
6329 | * is at least one waiter, and the time since the first waiter is blocked | |
6330 | * is greater than the watchdog timeout, panic the system. | |
6331 | */ | |
6332 | static void | |
6333 | mbuf_watchdog(void) | |
6334 | { | |
6335 | struct timeval now; | |
6336 | unsigned int since; | |
6337 | ||
6338 | if (mb_waiters == 0 || !mb_watchdog) | |
6339 | return; | |
6340 | ||
6341 | microuptime(&now); | |
6342 | since = now.tv_sec - mb_wdtstart.tv_sec; | |
6343 | if (since >= MB_WDT_MAXTIME) { | |
6344 | panic_plain("%s: %d waiters stuck for %u secs\n%s", __func__, | |
6345 | mb_waiters, since, mbuf_dump()); | |
6346 | /* NOTREACHED */ | |
6347 | } | |
6348 | } | |
6349 | ||
2d21ac55 A |
6350 | /* |
6351 | * Called during blocking allocation. Returns TRUE if one or more objects | |
6352 | * are available at the per-CPU caches layer and that allocation should be | |
6353 | * retried at that level. | |
6354 | */ | |
6355 | static boolean_t | |
6356 | mbuf_sleep(mbuf_class_t class, unsigned int num, int wait) | |
9bccf70c | 6357 | { |
2d21ac55 A |
6358 | boolean_t mcache_retry = FALSE; |
6359 | ||
6360 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
6361 | ||
6362 | /* Check if there's anything at the cache layer */ | |
6363 | if (mbuf_cached_above(class, wait)) { | |
6364 | mcache_retry = TRUE; | |
6365 | goto done; | |
6366 | } | |
6367 | ||
6368 | /* Nothing? Then try hard to get it from somewhere */ | |
6369 | m_reclaim(class, num, (wait & MCR_COMP)); | |
6370 | ||
6371 | /* We tried hard and got something? */ | |
6372 | if (m_infree(class) > 0) { | |
6373 | mbstat.m_wait++; | |
6374 | goto done; | |
6375 | } else if (mbuf_cached_above(class, wait)) { | |
6376 | mbstat.m_wait++; | |
6377 | mcache_retry = TRUE; | |
6378 | goto done; | |
6379 | } else if (wait & MCR_TRYHARD) { | |
6380 | mcache_retry = TRUE; | |
6381 | goto done; | |
6382 | } | |
6383 | ||
6384 | /* | |
6385 | * There's really nothing for us right now; inform the | |
6386 | * cache(s) that there is a waiter below and go to sleep. | |
6387 | */ | |
6388 | mbuf_waiter_inc(class, (wait & MCR_COMP)); | |
6389 | ||
6390 | VERIFY(!(wait & MCR_NOSLEEP)); | |
6d2010ae A |
6391 | |
6392 | /* | |
6393 | * If this is the first waiter, arm the watchdog timer. Otherwise | |
6394 | * check if we need to panic the system due to watchdog timeout. | |
6395 | */ | |
6396 | if (mb_waiters == 0) | |
6397 | microuptime(&mb_wdtstart); | |
6398 | else | |
6399 | mbuf_watchdog(); | |
6400 | ||
2d21ac55 A |
6401 | mb_waiters++; |
6402 | (void) msleep(mb_waitchan, mbuf_mlock, (PZERO-1), m_cname(class), NULL); | |
6403 | ||
6404 | /* We are now up; stop getting notified until next round */ | |
6405 | mbuf_waiter_dec(class, (wait & MCR_COMP)); | |
6406 | ||
6407 | /* We waited and got something */ | |
6408 | if (m_infree(class) > 0) { | |
6409 | mbstat.m_wait++; | |
6410 | goto done; | |
6411 | } else if (mbuf_cached_above(class, wait)) { | |
6412 | mbstat.m_wait++; | |
6413 | mcache_retry = TRUE; | |
6414 | } | |
6415 | done: | |
6416 | return (mcache_retry); | |
9bccf70c A |
6417 | } |
6418 | ||
39037602 | 6419 | __attribute__((noreturn)) |
91447636 | 6420 | static void |
2d21ac55 | 6421 | mbuf_worker_thread(void) |
1c79356b | 6422 | { |
2d21ac55 A |
6423 | int mbuf_expand; |
6424 | ||
91447636 | 6425 | while (1) { |
2d21ac55 | 6426 | lck_mtx_lock(mbuf_mlock); |
2d21ac55 | 6427 | mbuf_expand = 0; |
91447636 A |
6428 | if (mbuf_expand_mcl) { |
6429 | int n; | |
2d21ac55 A |
6430 | |
6431 | /* Adjust to current number of cluster in use */ | |
6432 | n = mbuf_expand_mcl - | |
6433 | (m_total(MC_CL) - m_infree(MC_CL)); | |
6434 | if ((n + m_total(MC_CL)) > m_maxlimit(MC_CL)) | |
6435 | n = m_maxlimit(MC_CL) - m_total(MC_CL); | |
91447636 | 6436 | mbuf_expand_mcl = 0; |
2d21ac55 A |
6437 | |
6438 | if (n > 0 && freelist_populate(MC_CL, n, M_WAIT) > 0) | |
6439 | mbuf_expand++; | |
91447636 A |
6440 | } |
6441 | if (mbuf_expand_big) { | |
6442 | int n; | |
2d21ac55 A |
6443 | |
6444 | /* Adjust to current number of 4 KB cluster in use */ | |
6445 | n = mbuf_expand_big - | |
6446 | (m_total(MC_BIGCL) - m_infree(MC_BIGCL)); | |
6447 | if ((n + m_total(MC_BIGCL)) > m_maxlimit(MC_BIGCL)) | |
6448 | n = m_maxlimit(MC_BIGCL) - m_total(MC_BIGCL); | |
91447636 | 6449 | mbuf_expand_big = 0; |
2d21ac55 A |
6450 | |
6451 | if (n > 0 && freelist_populate(MC_BIGCL, n, M_WAIT) > 0) | |
6452 | mbuf_expand++; | |
6453 | } | |
6454 | if (mbuf_expand_16k) { | |
6455 | int n; | |
6456 | ||
6457 | /* Adjust to current number of 16 KB cluster in use */ | |
6458 | n = mbuf_expand_16k - | |
6459 | (m_total(MC_16KCL) - m_infree(MC_16KCL)); | |
6460 | if ((n + m_total(MC_16KCL)) > m_maxlimit(MC_16KCL)) | |
6461 | n = m_maxlimit(MC_16KCL) - m_total(MC_16KCL); | |
6462 | mbuf_expand_16k = 0; | |
6463 | ||
6464 | if (n > 0) | |
6465 | (void) freelist_populate(MC_16KCL, n, M_WAIT); | |
6466 | } | |
6467 | ||
6468 | /* | |
6469 | * Because we can run out of memory before filling the mbuf | |
6470 | * map, we should not allocate more clusters than they are | |
6471 | * mbufs -- otherwise we could have a large number of useless | |
6472 | * clusters allocated. | |
91447636 | 6473 | */ |
2d21ac55 A |
6474 | if (mbuf_expand) { |
6475 | while (m_total(MC_MBUF) < | |
6476 | (m_total(MC_BIGCL) + m_total(MC_CL))) { | |
6477 | if (freelist_populate(MC_MBUF, 1, M_WAIT) == 0) | |
6478 | break; | |
6479 | } | |
91447636 | 6480 | } |
2d21ac55 | 6481 | |
39037602 A |
6482 | mbuf_worker_needs_wakeup = TRUE; |
6483 | assert_wait((caddr_t)&mbuf_worker_needs_wakeup, | |
6484 | THREAD_UNINT); | |
2d21ac55 | 6485 | lck_mtx_unlock(mbuf_mlock); |
2d21ac55 | 6486 | (void) thread_block((thread_continue_t)mbuf_worker_thread); |
91447636 | 6487 | } |
1c79356b A |
6488 | } |
6489 | ||
39037602 | 6490 | __attribute__((noreturn)) |
91447636 | 6491 | static void |
2d21ac55 | 6492 | mbuf_worker_thread_init(void) |
55e303ae | 6493 | { |
2d21ac55 A |
6494 | mbuf_worker_ready++; |
6495 | mbuf_worker_thread(); | |
55e303ae | 6496 | } |
1c79356b | 6497 | |
2d21ac55 A |
6498 | static mcl_slab_t * |
6499 | slab_get(void *buf) | |
6500 | { | |
6501 | mcl_slabg_t *slg; | |
6502 | unsigned int ix, k; | |
6503 | ||
6504 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
6505 | ||
6506 | VERIFY(MBUF_IN_MAP(buf)); | |
3e170ce0 | 6507 | ix = ((unsigned char *)buf - mbutl) >> MBSHIFT; |
2d21ac55 A |
6508 | VERIFY(ix < maxslabgrp); |
6509 | ||
6510 | if ((slg = slabstbl[ix]) == NULL) { | |
6511 | /* | |
39037602 | 6512 | * In the current implementation, we never shrink the slabs |
fe8ab488 A |
6513 | * table; if we attempt to reallocate a cluster group when |
6514 | * it's already allocated, panic since this is a sign of a | |
6515 | * memory corruption (slabstbl[ix] got nullified). | |
2d21ac55 A |
6516 | */ |
6517 | ++slabgrp; | |
6518 | VERIFY(ix < slabgrp); | |
6519 | /* | |
6520 | * Slabs expansion can only be done single threaded; when | |
6521 | * we get here, it must be as a result of m_clalloc() which | |
6522 | * is serialized and therefore mb_clalloc_busy must be set. | |
6523 | */ | |
6524 | VERIFY(mb_clalloc_busy); | |
6525 | lck_mtx_unlock(mbuf_mlock); | |
6526 | ||
6527 | /* This is a new buffer; create the slabs group for it */ | |
6528 | MALLOC(slg, mcl_slabg_t *, sizeof (*slg), M_TEMP, | |
6529 | M_WAITOK | M_ZERO); | |
3e170ce0 A |
6530 | MALLOC(slg->slg_slab, mcl_slab_t *, sizeof(mcl_slab_t) * NSLABSPMB, |
6531 | M_TEMP, M_WAITOK | M_ZERO); | |
6532 | VERIFY(slg != NULL && slg->slg_slab != NULL); | |
2d21ac55 A |
6533 | |
6534 | lck_mtx_lock(mbuf_mlock); | |
6535 | /* | |
6536 | * No other thread could have gone into m_clalloc() after | |
6537 | * we dropped the lock above, so verify that it's true. | |
6538 | */ | |
6539 | VERIFY(mb_clalloc_busy); | |
6540 | ||
6541 | slabstbl[ix] = slg; | |
6542 | ||
6543 | /* Chain each slab in the group to its forward neighbor */ | |
6544 | for (k = 1; k < NSLABSPMB; k++) | |
6545 | slg->slg_slab[k - 1].sl_next = &slg->slg_slab[k]; | |
6546 | VERIFY(slg->slg_slab[NSLABSPMB - 1].sl_next == NULL); | |
6547 | ||
6548 | /* And chain the last slab in the previous group to this */ | |
6549 | if (ix > 0) { | |
6550 | VERIFY(slabstbl[ix - 1]-> | |
6551 | slg_slab[NSLABSPMB - 1].sl_next == NULL); | |
6552 | slabstbl[ix - 1]->slg_slab[NSLABSPMB - 1].sl_next = | |
6553 | &slg->slg_slab[0]; | |
6554 | } | |
6555 | } | |
6556 | ||
3e170ce0 | 6557 | ix = MTOPG(buf) % NSLABSPMB; |
2d21ac55 A |
6558 | VERIFY(ix < NSLABSPMB); |
6559 | ||
6560 | return (&slg->slg_slab[ix]); | |
6561 | } | |
6562 | ||
6563 | static void | |
6564 | slab_init(mcl_slab_t *sp, mbuf_class_t class, u_int32_t flags, | |
6565 | void *base, void *head, unsigned int len, int refcnt, int chunks) | |
6566 | { | |
6567 | sp->sl_class = class; | |
6568 | sp->sl_flags = flags; | |
6569 | sp->sl_base = base; | |
6570 | sp->sl_head = head; | |
6571 | sp->sl_len = len; | |
6572 | sp->sl_refcnt = refcnt; | |
6573 | sp->sl_chunks = chunks; | |
6574 | slab_detach(sp); | |
6575 | } | |
6576 | ||
6577 | static void | |
6578 | slab_insert(mcl_slab_t *sp, mbuf_class_t class) | |
6579 | { | |
6580 | VERIFY(slab_is_detached(sp)); | |
6581 | m_slab_cnt(class)++; | |
6582 | TAILQ_INSERT_TAIL(&m_slablist(class), sp, sl_link); | |
6583 | sp->sl_flags &= ~SLF_DETACHED; | |
3e170ce0 A |
6584 | |
6585 | /* | |
6586 | * If a buffer spans multiple contiguous pages then mark them as | |
6587 | * detached too | |
6588 | */ | |
6d2010ae | 6589 | if (class == MC_16KCL) { |
2d21ac55 | 6590 | int k; |
6d2010ae | 6591 | for (k = 1; k < NSLABSP16KB; k++) { |
2d21ac55 A |
6592 | sp = sp->sl_next; |
6593 | /* Next slab must already be present */ | |
3e170ce0 | 6594 | VERIFY(sp != NULL && slab_is_detached(sp)); |
2d21ac55 A |
6595 | sp->sl_flags &= ~SLF_DETACHED; |
6596 | } | |
6597 | } | |
6598 | } | |
6599 | ||
6600 | static void | |
6601 | slab_remove(mcl_slab_t *sp, mbuf_class_t class) | |
6602 | { | |
3e170ce0 | 6603 | int k; |
2d21ac55 A |
6604 | VERIFY(!slab_is_detached(sp)); |
6605 | VERIFY(m_slab_cnt(class) > 0); | |
6606 | m_slab_cnt(class)--; | |
6607 | TAILQ_REMOVE(&m_slablist(class), sp, sl_link); | |
6608 | slab_detach(sp); | |
6d2010ae | 6609 | if (class == MC_16KCL) { |
6d2010ae | 6610 | for (k = 1; k < NSLABSP16KB; k++) { |
2d21ac55 A |
6611 | sp = sp->sl_next; |
6612 | /* Next slab must already be present */ | |
6613 | VERIFY(sp != NULL); | |
6614 | VERIFY(!slab_is_detached(sp)); | |
6615 | slab_detach(sp); | |
6616 | } | |
6617 | } | |
6618 | } | |
6619 | ||
6620 | static boolean_t | |
6621 | slab_inrange(mcl_slab_t *sp, void *buf) | |
6622 | { | |
6623 | return ((uintptr_t)buf >= (uintptr_t)sp->sl_base && | |
6624 | (uintptr_t)buf < ((uintptr_t)sp->sl_base + sp->sl_len)); | |
6625 | } | |
6626 | ||
b0d623f7 | 6627 | #undef panic |
2d21ac55 A |
6628 | |
6629 | static void | |
6630 | slab_nextptr_panic(mcl_slab_t *sp, void *addr) | |
6631 | { | |
6632 | int i; | |
6633 | unsigned int chunk_len = sp->sl_len / sp->sl_chunks; | |
6634 | uintptr_t buf = (uintptr_t)sp->sl_base; | |
6635 | ||
6636 | for (i = 0; i < sp->sl_chunks; i++, buf += chunk_len) { | |
6637 | void *next = ((mcache_obj_t *)buf)->obj_next; | |
6638 | if (next != addr) | |
6639 | continue; | |
6d2010ae | 6640 | if (!mclverify) { |
2d21ac55 A |
6641 | if (next != NULL && !MBUF_IN_MAP(next)) { |
6642 | mcache_t *cp = m_cache(sp->sl_class); | |
6643 | panic("%s: %s buffer %p in slab %p modified " | |
6644 | "after free at offset 0: %p out of range " | |
6645 | "[%p-%p)\n", __func__, cp->mc_name, | |
6646 | (void *)buf, sp, next, mbutl, embutl); | |
6647 | /* NOTREACHED */ | |
6648 | } | |
6649 | } else { | |
6650 | mcache_audit_t *mca = mcl_audit_buf2mca(sp->sl_class, | |
6651 | (mcache_obj_t *)buf); | |
6652 | mcl_audit_verify_nextptr(next, mca); | |
6653 | } | |
6654 | } | |
6655 | } | |
6656 | ||
6657 | static void | |
6658 | slab_detach(mcl_slab_t *sp) | |
6659 | { | |
6660 | sp->sl_link.tqe_next = (mcl_slab_t *)-1; | |
6661 | sp->sl_link.tqe_prev = (mcl_slab_t **)-1; | |
6662 | sp->sl_flags |= SLF_DETACHED; | |
6663 | } | |
6664 | ||
6665 | static boolean_t | |
6666 | slab_is_detached(mcl_slab_t *sp) | |
6667 | { | |
6668 | return ((intptr_t)sp->sl_link.tqe_next == -1 && | |
6669 | (intptr_t)sp->sl_link.tqe_prev == -1 && | |
6670 | (sp->sl_flags & SLF_DETACHED)); | |
6671 | } | |
6672 | ||
6673 | static void | |
6674 | mcl_audit_init(void *buf, mcache_audit_t **mca_list, | |
6675 | mcache_obj_t **con_list, size_t con_size, unsigned int num) | |
6676 | { | |
6677 | mcache_audit_t *mca, *mca_tail; | |
6678 | mcache_obj_t *con = NULL; | |
6679 | boolean_t save_contents = (con_list != NULL); | |
6680 | unsigned int i, ix; | |
6681 | ||
3e170ce0 | 6682 | ASSERT(num <= NMBPG); |
2d21ac55 A |
6683 | ASSERT(con_list == NULL || con_size != 0); |
6684 | ||
3e170ce0 | 6685 | ix = MTOPG(buf); |
6d2010ae A |
6686 | VERIFY(ix < maxclaudit); |
6687 | ||
2d21ac55 | 6688 | /* Make sure we haven't been here before */ |
3e170ce0 | 6689 | for (i = 0; i < NMBPG; i++) |
2d21ac55 A |
6690 | VERIFY(mclaudit[ix].cl_audit[i] == NULL); |
6691 | ||
6692 | mca = mca_tail = *mca_list; | |
6693 | if (save_contents) | |
6694 | con = *con_list; | |
6695 | ||
6696 | for (i = 0; i < num; i++) { | |
6697 | mcache_audit_t *next; | |
6698 | ||
6699 | next = mca->mca_next; | |
6700 | bzero(mca, sizeof (*mca)); | |
6701 | mca->mca_next = next; | |
6702 | mclaudit[ix].cl_audit[i] = mca; | |
6703 | ||
6704 | /* Attach the contents buffer if requested */ | |
6705 | if (save_contents) { | |
39236c6e A |
6706 | mcl_saved_contents_t *msc = |
6707 | (mcl_saved_contents_t *)(void *)con; | |
6708 | ||
6709 | VERIFY(msc != NULL); | |
6710 | VERIFY(IS_P2ALIGNED(msc, sizeof (u_int64_t))); | |
6711 | VERIFY(con_size == sizeof (*msc)); | |
2d21ac55 | 6712 | mca->mca_contents_size = con_size; |
39236c6e | 6713 | mca->mca_contents = msc; |
2d21ac55 A |
6714 | con = con->obj_next; |
6715 | bzero(mca->mca_contents, mca->mca_contents_size); | |
6716 | } | |
6717 | ||
6718 | mca_tail = mca; | |
6719 | mca = mca->mca_next; | |
6720 | } | |
91447636 | 6721 | |
2d21ac55 A |
6722 | if (save_contents) |
6723 | *con_list = con; | |
6724 | ||
6725 | *mca_list = mca_tail->mca_next; | |
6726 | mca_tail->mca_next = NULL; | |
6727 | } | |
6728 | ||
fe8ab488 A |
6729 | static void |
6730 | mcl_audit_free(void *buf, unsigned int num) | |
6731 | { | |
6732 | unsigned int i, ix; | |
6733 | mcache_audit_t *mca, *mca_list; | |
6734 | ||
3e170ce0 | 6735 | ix = MTOPG(buf); |
fe8ab488 | 6736 | VERIFY(ix < maxclaudit); |
39037602 | 6737 | |
fe8ab488 A |
6738 | if (mclaudit[ix].cl_audit[0] != NULL) { |
6739 | mca_list = mclaudit[ix].cl_audit[0]; | |
6740 | for (i = 0; i < num; i++) { | |
6741 | mca = mclaudit[ix].cl_audit[i]; | |
6742 | mclaudit[ix].cl_audit[i] = NULL; | |
6743 | if (mca->mca_contents) | |
6744 | mcache_free(mcl_audit_con_cache, | |
6745 | mca->mca_contents); | |
6746 | } | |
6747 | mcache_free_ext(mcache_audit_cache, | |
6748 | (mcache_obj_t *)mca_list); | |
6749 | } | |
6750 | } | |
6751 | ||
2d21ac55 | 6752 | /* |
6d2010ae | 6753 | * Given an address of a buffer (mbuf/2KB/4KB/16KB), return |
2d21ac55 A |
6754 | * the corresponding audit structure for that buffer. |
6755 | */ | |
6756 | static mcache_audit_t * | |
3e170ce0 | 6757 | mcl_audit_buf2mca(mbuf_class_t class, mcache_obj_t *mobj) |
2d21ac55 A |
6758 | { |
6759 | mcache_audit_t *mca = NULL; | |
3e170ce0 A |
6760 | int ix = MTOPG(mobj), m_idx = 0; |
6761 | unsigned char *page_addr; | |
2d21ac55 | 6762 | |
6d2010ae | 6763 | VERIFY(ix < maxclaudit); |
3e170ce0 A |
6764 | VERIFY(IS_P2ALIGNED(mobj, MIN(m_maxsize(class), PAGE_SIZE))); |
6765 | ||
6766 | page_addr = PGTOM(ix); | |
2d21ac55 A |
6767 | |
6768 | switch (class) { | |
6769 | case MC_MBUF: | |
6770 | /* | |
6d2010ae | 6771 | * For the mbuf case, find the index of the page |
2d21ac55 | 6772 | * used by the mbuf and use that index to locate the |
6d2010ae A |
6773 | * base address of the page. Then find out the |
6774 | * mbuf index relative to the page base and use | |
2d21ac55 A |
6775 | * it to locate the audit structure. |
6776 | */ | |
3e170ce0 A |
6777 | m_idx = MBPAGEIDX(page_addr, mobj); |
6778 | VERIFY(m_idx < (int)NMBPG); | |
6779 | mca = mclaudit[ix].cl_audit[m_idx]; | |
2d21ac55 A |
6780 | break; |
6781 | ||
6782 | case MC_CL: | |
6d2010ae A |
6783 | /* |
6784 | * Same thing as above, but for 2KB clusters in a page. | |
6785 | */ | |
3e170ce0 A |
6786 | m_idx = CLPAGEIDX(page_addr, mobj); |
6787 | VERIFY(m_idx < (int)NCLPG); | |
6788 | mca = mclaudit[ix].cl_audit[m_idx]; | |
6d2010ae A |
6789 | break; |
6790 | ||
2d21ac55 | 6791 | case MC_BIGCL: |
3e170ce0 A |
6792 | m_idx = BCLPAGEIDX(page_addr, mobj); |
6793 | VERIFY(m_idx < (int)NBCLPG); | |
6794 | mca = mclaudit[ix].cl_audit[m_idx]; | |
6795 | break; | |
2d21ac55 A |
6796 | case MC_16KCL: |
6797 | /* | |
6798 | * Same as above, but only return the first element. | |
6799 | */ | |
6800 | mca = mclaudit[ix].cl_audit[0]; | |
6801 | break; | |
6802 | ||
6803 | default: | |
6804 | VERIFY(0); | |
6805 | /* NOTREACHED */ | |
6806 | } | |
6807 | ||
6808 | return (mca); | |
6809 | } | |
6810 | ||
6811 | static void | |
6812 | mcl_audit_mbuf(mcache_audit_t *mca, void *addr, boolean_t composite, | |
6813 | boolean_t alloc) | |
6814 | { | |
6815 | struct mbuf *m = addr; | |
6816 | mcache_obj_t *next = ((mcache_obj_t *)m)->obj_next; | |
6817 | ||
6818 | VERIFY(mca->mca_contents != NULL && | |
6819 | mca->mca_contents_size == AUDIT_CONTENTS_SIZE); | |
6820 | ||
6d2010ae A |
6821 | if (mclverify) |
6822 | mcl_audit_verify_nextptr(next, mca); | |
2d21ac55 A |
6823 | |
6824 | if (!alloc) { | |
6825 | /* Save constructed mbuf fields */ | |
6826 | mcl_audit_save_mbuf(m, mca); | |
6d2010ae A |
6827 | if (mclverify) { |
6828 | mcache_set_pattern(MCACHE_FREE_PATTERN, m, | |
6829 | m_maxsize(MC_MBUF)); | |
6830 | } | |
2d21ac55 A |
6831 | ((mcache_obj_t *)m)->obj_next = next; |
6832 | return; | |
6833 | } | |
6834 | ||
6835 | /* Check if the buffer has been corrupted while in freelist */ | |
6d2010ae A |
6836 | if (mclverify) { |
6837 | mcache_audit_free_verify_set(mca, addr, 0, m_maxsize(MC_MBUF)); | |
6838 | } | |
2d21ac55 A |
6839 | /* Restore constructed mbuf fields */ |
6840 | mcl_audit_restore_mbuf(m, mca, composite); | |
6841 | } | |
6842 | ||
6843 | static void | |
6844 | mcl_audit_restore_mbuf(struct mbuf *m, mcache_audit_t *mca, boolean_t composite) | |
6845 | { | |
39236c6e | 6846 | struct mbuf *ms = MCA_SAVED_MBUF_PTR(mca); |
2d21ac55 A |
6847 | |
6848 | if (composite) { | |
6849 | struct mbuf *next = m->m_next; | |
813fb2f6 | 6850 | VERIFY(ms->m_flags == M_EXT && m_get_rfa(ms) != NULL && |
2d21ac55 | 6851 | MBUF_IS_COMPOSITE(ms)); |
39236c6e | 6852 | VERIFY(mca->mca_contents_size == AUDIT_CONTENTS_SIZE); |
2d21ac55 A |
6853 | /* |
6854 | * We could have hand-picked the mbuf fields and restore | |
6855 | * them individually, but that will be a maintenance | |
6856 | * headache. Instead, restore everything that was saved; | |
6857 | * the mbuf layer will recheck and reinitialize anyway. | |
6858 | */ | |
39236c6e | 6859 | bcopy(ms, m, MCA_SAVED_MBUF_SIZE); |
2d21ac55 A |
6860 | m->m_next = next; |
6861 | } else { | |
6862 | /* | |
6863 | * For a regular mbuf (no cluster attached) there's nothing | |
6864 | * to restore other than the type field, which is expected | |
6865 | * to be MT_FREE. | |
6866 | */ | |
6867 | m->m_type = ms->m_type; | |
6868 | } | |
6869 | _MCHECK(m); | |
6870 | } | |
6871 | ||
6872 | static void | |
6873 | mcl_audit_save_mbuf(struct mbuf *m, mcache_audit_t *mca) | |
6874 | { | |
39236c6e | 6875 | VERIFY(mca->mca_contents_size == AUDIT_CONTENTS_SIZE); |
2d21ac55 | 6876 | _MCHECK(m); |
39236c6e | 6877 | bcopy(m, MCA_SAVED_MBUF_PTR(mca), MCA_SAVED_MBUF_SIZE); |
2d21ac55 A |
6878 | } |
6879 | ||
6880 | static void | |
6881 | mcl_audit_cluster(mcache_audit_t *mca, void *addr, size_t size, boolean_t alloc, | |
6882 | boolean_t save_next) | |
6883 | { | |
6884 | mcache_obj_t *next = ((mcache_obj_t *)addr)->obj_next; | |
6885 | ||
6886 | if (!alloc) { | |
6d2010ae A |
6887 | if (mclverify) { |
6888 | mcache_set_pattern(MCACHE_FREE_PATTERN, addr, size); | |
6889 | } | |
2d21ac55 A |
6890 | if (save_next) { |
6891 | mcl_audit_verify_nextptr(next, mca); | |
6892 | ((mcache_obj_t *)addr)->obj_next = next; | |
6893 | } | |
6d2010ae | 6894 | } else if (mclverify) { |
2d21ac55 A |
6895 | /* Check if the buffer has been corrupted while in freelist */ |
6896 | mcl_audit_verify_nextptr(next, mca); | |
6897 | mcache_audit_free_verify_set(mca, addr, 0, size); | |
6898 | } | |
6899 | } | |
6900 | ||
39236c6e A |
6901 | static void |
6902 | mcl_audit_scratch(mcache_audit_t *mca) | |
6903 | { | |
6904 | void *stack[MCACHE_STACK_DEPTH + 1]; | |
6905 | mcl_scratch_audit_t *msa; | |
6906 | struct timeval now; | |
6907 | ||
6908 | VERIFY(mca->mca_contents != NULL); | |
6909 | msa = MCA_SAVED_SCRATCH_PTR(mca); | |
6910 | ||
6911 | msa->msa_pthread = msa->msa_thread; | |
6912 | msa->msa_thread = current_thread(); | |
6913 | bcopy(msa->msa_stack, msa->msa_pstack, sizeof (msa->msa_pstack)); | |
6914 | msa->msa_pdepth = msa->msa_depth; | |
6915 | bzero(stack, sizeof (stack)); | |
6916 | msa->msa_depth = OSBacktrace(stack, MCACHE_STACK_DEPTH + 1) - 1; | |
fe8ab488 | 6917 | bcopy(&stack[1], msa->msa_stack, sizeof (msa->msa_stack)); |
39236c6e A |
6918 | |
6919 | msa->msa_ptstamp = msa->msa_tstamp; | |
6920 | microuptime(&now); | |
6921 | /* tstamp is in ms relative to base_ts */ | |
6922 | msa->msa_tstamp = ((now.tv_usec - mb_start.tv_usec) / 1000); | |
6923 | if ((now.tv_sec - mb_start.tv_sec) > 0) | |
6924 | msa->msa_tstamp += ((now.tv_sec - mb_start.tv_sec) * 1000); | |
6925 | } | |
6926 | ||
2d21ac55 A |
6927 | static void |
6928 | mcl_audit_mcheck_panic(struct mbuf *m) | |
6929 | { | |
6930 | mcache_audit_t *mca; | |
6931 | ||
6932 | MRANGE(m); | |
6933 | mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m); | |
6934 | ||
6935 | panic("mcl_audit: freed mbuf %p with type 0x%x (instead of 0x%x)\n%s\n", | |
6936 | m, (u_int16_t)m->m_type, MT_FREE, mcache_dump_mca(mca)); | |
6937 | /* NOTREACHED */ | |
6938 | } | |
6939 | ||
6940 | static void | |
6941 | mcl_audit_verify_nextptr(void *next, mcache_audit_t *mca) | |
6942 | { | |
6d2010ae A |
6943 | if (next != NULL && !MBUF_IN_MAP(next) && |
6944 | (next != (void *)MCACHE_FREE_PATTERN || !mclverify)) { | |
2d21ac55 A |
6945 | panic("mcl_audit: buffer %p modified after free at offset 0: " |
6946 | "%p out of range [%p-%p)\n%s\n", | |
6947 | mca->mca_addr, next, mbutl, embutl, mcache_dump_mca(mca)); | |
6948 | /* NOTREACHED */ | |
6949 | } | |
6950 | } | |
6951 | ||
6d2010ae A |
6952 | /* This function turns on mbuf leak detection */ |
6953 | static void | |
6954 | mleak_activate(void) | |
6955 | { | |
6956 | mleak_table.mleak_sample_factor = MLEAK_SAMPLE_FACTOR; | |
6957 | PE_parse_boot_argn("mleak_sample_factor", | |
6958 | &mleak_table.mleak_sample_factor, | |
6959 | sizeof (mleak_table.mleak_sample_factor)); | |
6960 | ||
6961 | if (mleak_table.mleak_sample_factor == 0) | |
6962 | mclfindleak = 0; | |
6963 | ||
6964 | if (mclfindleak == 0) | |
6965 | return; | |
6966 | ||
6967 | vm_size_t alloc_size = | |
6968 | mleak_alloc_buckets * sizeof (struct mallocation); | |
6969 | vm_size_t trace_size = mleak_trace_buckets * sizeof (struct mtrace); | |
6970 | ||
6971 | MALLOC(mleak_allocations, struct mallocation *, alloc_size, | |
6972 | M_TEMP, M_WAITOK | M_ZERO); | |
6973 | VERIFY(mleak_allocations != NULL); | |
6974 | ||
6975 | MALLOC(mleak_traces, struct mtrace *, trace_size, | |
6976 | M_TEMP, M_WAITOK | M_ZERO); | |
6977 | VERIFY(mleak_traces != NULL); | |
6978 | ||
6979 | MALLOC(mleak_stat, mleak_stat_t *, MLEAK_STAT_SIZE(MLEAK_NUM_TRACES), | |
6980 | M_TEMP, M_WAITOK | M_ZERO); | |
6981 | VERIFY(mleak_stat != NULL); | |
6982 | mleak_stat->ml_cnt = MLEAK_NUM_TRACES; | |
6983 | #ifdef __LP64__ | |
6984 | mleak_stat->ml_isaddr64 = 1; | |
6985 | #endif /* __LP64__ */ | |
6986 | } | |
6987 | ||
6988 | static void | |
6989 | mleak_logger(u_int32_t num, mcache_obj_t *addr, boolean_t alloc) | |
6990 | { | |
6991 | int temp; | |
6992 | ||
6993 | if (mclfindleak == 0) | |
6994 | return; | |
6995 | ||
6996 | if (!alloc) | |
6997 | return (mleak_free(addr)); | |
6998 | ||
6999 | temp = atomic_add_32_ov(&mleak_table.mleak_capture, 1); | |
7000 | ||
7001 | if ((temp % mleak_table.mleak_sample_factor) == 0 && addr != NULL) { | |
7002 | uintptr_t bt[MLEAK_STACK_DEPTH]; | |
39037602 | 7003 | int logged = backtrace(bt, MLEAK_STACK_DEPTH); |
6d2010ae A |
7004 | mleak_log(bt, addr, logged, num); |
7005 | } | |
7006 | } | |
7007 | ||
7008 | /* | |
7009 | * This function records the allocation in the mleak_allocations table | |
7010 | * and the backtrace in the mleak_traces table; if allocation slot is in use, | |
7011 | * replace old allocation with new one if the trace slot is in use, return | |
7012 | * (or increment refcount if same trace). | |
7013 | */ | |
7014 | static boolean_t | |
7015 | mleak_log(uintptr_t *bt, mcache_obj_t *addr, uint32_t depth, int num) | |
7016 | { | |
7017 | struct mallocation *allocation; | |
7018 | struct mtrace *trace; | |
7019 | uint32_t trace_index; | |
6d2010ae A |
7020 | |
7021 | /* Quit if someone else modifying the tables */ | |
7022 | if (!lck_mtx_try_lock_spin(mleak_lock)) { | |
7023 | mleak_table.total_conflicts++; | |
7024 | return (FALSE); | |
7025 | } | |
7026 | ||
7027 | allocation = &mleak_allocations[hashaddr((uintptr_t)addr, | |
7028 | mleak_alloc_buckets)]; | |
7029 | trace_index = hashbacktrace(bt, depth, mleak_trace_buckets); | |
7030 | trace = &mleak_traces[trace_index]; | |
7031 | ||
7032 | VERIFY(allocation <= &mleak_allocations[mleak_alloc_buckets - 1]); | |
7033 | VERIFY(trace <= &mleak_traces[mleak_trace_buckets - 1]); | |
7034 | ||
7035 | allocation->hitcount++; | |
7036 | trace->hitcount++; | |
7037 | ||
7038 | /* | |
7039 | * If the allocation bucket we want is occupied | |
7040 | * and the occupier has the same trace, just bail. | |
7041 | */ | |
7042 | if (allocation->element != NULL && | |
7043 | trace_index == allocation->trace_index) { | |
7044 | mleak_table.alloc_collisions++; | |
7045 | lck_mtx_unlock(mleak_lock); | |
7046 | return (TRUE); | |
7047 | } | |
7048 | ||
7049 | /* | |
7050 | * Store the backtrace in the traces array; | |
7051 | * Size of zero = trace bucket is free. | |
7052 | */ | |
7053 | if (trace->allocs > 0 && | |
7054 | bcmp(trace->addr, bt, (depth * sizeof (uintptr_t))) != 0) { | |
7055 | /* Different, unique trace, but the same hash! Bail out. */ | |
7056 | trace->collisions++; | |
7057 | mleak_table.trace_collisions++; | |
7058 | lck_mtx_unlock(mleak_lock); | |
7059 | return (TRUE); | |
7060 | } else if (trace->allocs > 0) { | |
7061 | /* Same trace, already added, so increment refcount */ | |
7062 | trace->allocs++; | |
7063 | } else { | |
7064 | /* Found an unused trace bucket, so record the trace here */ | |
7065 | if (trace->depth != 0) { | |
7066 | /* this slot previously used but not currently in use */ | |
7067 | mleak_table.trace_overwrites++; | |
7068 | } | |
7069 | mleak_table.trace_recorded++; | |
7070 | trace->allocs = 1; | |
7071 | memcpy(trace->addr, bt, (depth * sizeof (uintptr_t))); | |
7072 | trace->depth = depth; | |
7073 | trace->collisions = 0; | |
7074 | } | |
7075 | ||
7076 | /* Step 2: Store the allocation record in the allocations array */ | |
7077 | if (allocation->element != NULL) { | |
7078 | /* | |
7079 | * Replace an existing allocation. No need to preserve | |
7080 | * because only a subset of the allocations are being | |
7081 | * recorded anyway. | |
7082 | */ | |
7083 | mleak_table.alloc_collisions++; | |
7084 | } else if (allocation->trace_index != 0) { | |
7085 | mleak_table.alloc_overwrites++; | |
7086 | } | |
7087 | allocation->element = addr; | |
7088 | allocation->trace_index = trace_index; | |
7089 | allocation->count = num; | |
7090 | mleak_table.alloc_recorded++; | |
7091 | mleak_table.outstanding_allocs++; | |
7092 | ||
6d2010ae A |
7093 | lck_mtx_unlock(mleak_lock); |
7094 | return (TRUE); | |
7095 | } | |
7096 | ||
7097 | static void | |
7098 | mleak_free(mcache_obj_t *addr) | |
7099 | { | |
7100 | while (addr != NULL) { | |
7101 | struct mallocation *allocation = &mleak_allocations | |
7102 | [hashaddr((uintptr_t)addr, mleak_alloc_buckets)]; | |
7103 | ||
7104 | if (allocation->element == addr && | |
7105 | allocation->trace_index < mleak_trace_buckets) { | |
7106 | lck_mtx_lock_spin(mleak_lock); | |
7107 | if (allocation->element == addr && | |
7108 | allocation->trace_index < mleak_trace_buckets) { | |
7109 | struct mtrace *trace; | |
7110 | trace = &mleak_traces[allocation->trace_index]; | |
7111 | /* allocs = 0 means trace bucket is unused */ | |
7112 | if (trace->allocs > 0) | |
7113 | trace->allocs--; | |
7114 | if (trace->allocs == 0) | |
7115 | trace->depth = 0; | |
7116 | /* NULL element means alloc bucket is unused */ | |
7117 | allocation->element = NULL; | |
7118 | mleak_table.outstanding_allocs--; | |
7119 | } | |
7120 | lck_mtx_unlock(mleak_lock); | |
7121 | } | |
7122 | addr = addr->obj_next; | |
7123 | } | |
7124 | } | |
7125 | ||
316670eb A |
7126 | static void |
7127 | mleak_sort_traces() | |
7128 | { | |
7129 | int i, j, k; | |
7130 | struct mtrace *swap; | |
7131 | ||
7132 | for(i = 0; i < MLEAK_NUM_TRACES; i++) | |
7133 | mleak_top_trace[i] = NULL; | |
7134 | ||
7135 | for(i = 0, j = 0; j < MLEAK_NUM_TRACES && i < mleak_trace_buckets; i++) | |
7136 | { | |
7137 | if (mleak_traces[i].allocs <= 0) | |
7138 | continue; | |
7139 | ||
7140 | mleak_top_trace[j] = &mleak_traces[i]; | |
7141 | for (k = j; k > 0; k--) { | |
7142 | if (mleak_top_trace[k]->allocs <= | |
7143 | mleak_top_trace[k-1]->allocs) | |
7144 | break; | |
7145 | ||
7146 | swap = mleak_top_trace[k-1]; | |
7147 | mleak_top_trace[k-1] = mleak_top_trace[k]; | |
7148 | mleak_top_trace[k] = swap; | |
7149 | } | |
7150 | j++; | |
7151 | } | |
7152 | ||
7153 | j--; | |
7154 | for(; i < mleak_trace_buckets; i++) { | |
7155 | if (mleak_traces[i].allocs <= mleak_top_trace[j]->allocs) | |
7156 | continue; | |
7157 | ||
7158 | mleak_top_trace[j] = &mleak_traces[i]; | |
7159 | ||
7160 | for (k = j; k > 0; k--) { | |
7161 | if (mleak_top_trace[k]->allocs <= | |
7162 | mleak_top_trace[k-1]->allocs) | |
7163 | break; | |
7164 | ||
7165 | swap = mleak_top_trace[k-1]; | |
7166 | mleak_top_trace[k-1] = mleak_top_trace[k]; | |
7167 | mleak_top_trace[k] = swap; | |
7168 | } | |
7169 | } | |
7170 | } | |
7171 | ||
7172 | static void | |
7173 | mleak_update_stats() | |
7174 | { | |
7175 | mleak_trace_stat_t *mltr; | |
7176 | int i; | |
7177 | ||
7178 | VERIFY(mleak_stat != NULL); | |
7179 | #ifdef __LP64__ | |
7180 | VERIFY(mleak_stat->ml_isaddr64); | |
7181 | #else | |
7182 | VERIFY(!mleak_stat->ml_isaddr64); | |
7183 | #endif /* !__LP64__ */ | |
7184 | VERIFY(mleak_stat->ml_cnt == MLEAK_NUM_TRACES); | |
7185 | ||
7186 | mleak_sort_traces(); | |
7187 | ||
7188 | mltr = &mleak_stat->ml_trace[0]; | |
7189 | bzero(mltr, sizeof (*mltr) * MLEAK_NUM_TRACES); | |
7190 | for (i = 0; i < MLEAK_NUM_TRACES; i++) { | |
7191 | int j; | |
7192 | ||
7193 | if (mleak_top_trace[i] == NULL || | |
7194 | mleak_top_trace[i]->allocs == 0) | |
7195 | continue; | |
7196 | ||
7197 | mltr->mltr_collisions = mleak_top_trace[i]->collisions; | |
7198 | mltr->mltr_hitcount = mleak_top_trace[i]->hitcount; | |
7199 | mltr->mltr_allocs = mleak_top_trace[i]->allocs; | |
7200 | mltr->mltr_depth = mleak_top_trace[i]->depth; | |
7201 | ||
7202 | VERIFY(mltr->mltr_depth <= MLEAK_STACK_DEPTH); | |
7203 | for (j = 0; j < mltr->mltr_depth; j++) | |
7204 | mltr->mltr_addr[j] = mleak_top_trace[i]->addr[j]; | |
7205 | ||
7206 | mltr++; | |
7207 | } | |
7208 | } | |
7209 | ||
6d2010ae A |
7210 | static struct mbtypes { |
7211 | int mt_type; | |
7212 | const char *mt_name; | |
7213 | } mbtypes[] = { | |
7214 | { MT_DATA, "data" }, | |
7215 | { MT_OOBDATA, "oob data" }, | |
7216 | { MT_CONTROL, "ancillary data" }, | |
7217 | { MT_HEADER, "packet headers" }, | |
7218 | { MT_SOCKET, "socket structures" }, | |
7219 | { MT_PCB, "protocol control blocks" }, | |
7220 | { MT_RTABLE, "routing table entries" }, | |
7221 | { MT_HTABLE, "IMP host table entries" }, | |
7222 | { MT_ATABLE, "address resolution tables" }, | |
7223 | { MT_FTABLE, "fragment reassembly queue headers" }, | |
7224 | { MT_SONAME, "socket names and addresses" }, | |
7225 | { MT_SOOPTS, "socket options" }, | |
7226 | { MT_RIGHTS, "access rights" }, | |
7227 | { MT_IFADDR, "interface addresses" }, | |
7228 | { MT_TAG, "packet tags" }, | |
7229 | { 0, NULL } | |
7230 | }; | |
7231 | ||
7232 | #define MBUF_DUMP_BUF_CHK() { \ | |
7233 | clen -= k; \ | |
7234 | if (clen < 1) \ | |
7235 | goto done; \ | |
7236 | c += k; \ | |
7237 | } | |
7238 | ||
7239 | static char * | |
7240 | mbuf_dump(void) | |
7241 | { | |
7242 | unsigned long totmem = 0, totfree = 0, totmbufs, totused, totpct; | |
7243 | u_int32_t m_mbufs = 0, m_clfree = 0, m_bigclfree = 0; | |
7244 | u_int32_t m_mbufclfree = 0, m_mbufbigclfree = 0; | |
7245 | u_int32_t m_16kclusters = 0, m_16kclfree = 0, m_mbuf16kclfree = 0; | |
7246 | int nmbtypes = sizeof (mbstat.m_mtypes) / sizeof (short); | |
7247 | uint8_t seen[256]; | |
7248 | struct mbtypes *mp; | |
7249 | mb_class_stat_t *sp; | |
316670eb | 7250 | mleak_trace_stat_t *mltr; |
6d2010ae | 7251 | char *c = mbuf_dump_buf; |
316670eb | 7252 | int i, k, clen = MBUF_DUMP_BUF_SIZE; |
6d2010ae A |
7253 | |
7254 | mbuf_dump_buf[0] = '\0'; | |
7255 | ||
7256 | /* synchronize all statistics in the mbuf table */ | |
7257 | mbuf_stat_sync(); | |
7258 | mbuf_mtypes_sync(TRUE); | |
7259 | ||
7260 | sp = &mb_stat->mbs_class[0]; | |
7261 | for (i = 0; i < mb_stat->mbs_cnt; i++, sp++) { | |
7262 | u_int32_t mem; | |
7263 | ||
7264 | if (m_class(i) == MC_MBUF) { | |
7265 | m_mbufs = sp->mbcl_active; | |
7266 | } else if (m_class(i) == MC_CL) { | |
7267 | m_clfree = sp->mbcl_total - sp->mbcl_active; | |
7268 | } else if (m_class(i) == MC_BIGCL) { | |
7269 | m_bigclfree = sp->mbcl_total - sp->mbcl_active; | |
7270 | } else if (njcl > 0 && m_class(i) == MC_16KCL) { | |
7271 | m_16kclfree = sp->mbcl_total - sp->mbcl_active; | |
7272 | m_16kclusters = sp->mbcl_total; | |
7273 | } else if (m_class(i) == MC_MBUF_CL) { | |
7274 | m_mbufclfree = sp->mbcl_total - sp->mbcl_active; | |
7275 | } else if (m_class(i) == MC_MBUF_BIGCL) { | |
7276 | m_mbufbigclfree = sp->mbcl_total - sp->mbcl_active; | |
7277 | } else if (njcl > 0 && m_class(i) == MC_MBUF_16KCL) { | |
7278 | m_mbuf16kclfree = sp->mbcl_total - sp->mbcl_active; | |
7279 | } | |
7280 | ||
7281 | mem = sp->mbcl_ctotal * sp->mbcl_size; | |
7282 | totmem += mem; | |
7283 | totfree += (sp->mbcl_mc_cached + sp->mbcl_infree) * | |
7284 | sp->mbcl_size; | |
7285 | ||
7286 | } | |
7287 | ||
7288 | /* adjust free counts to include composite caches */ | |
7289 | m_clfree += m_mbufclfree; | |
7290 | m_bigclfree += m_mbufbigclfree; | |
7291 | m_16kclfree += m_mbuf16kclfree; | |
7292 | ||
7293 | totmbufs = 0; | |
7294 | for (mp = mbtypes; mp->mt_name != NULL; mp++) | |
7295 | totmbufs += mbstat.m_mtypes[mp->mt_type]; | |
7296 | if (totmbufs > m_mbufs) | |
7297 | totmbufs = m_mbufs; | |
7298 | k = snprintf(c, clen, "%lu/%u mbufs in use:\n", totmbufs, m_mbufs); | |
7299 | MBUF_DUMP_BUF_CHK(); | |
7300 | ||
7301 | bzero(&seen, sizeof (seen)); | |
7302 | for (mp = mbtypes; mp->mt_name != NULL; mp++) { | |
7303 | if (mbstat.m_mtypes[mp->mt_type] != 0) { | |
7304 | seen[mp->mt_type] = 1; | |
7305 | k = snprintf(c, clen, "\t%u mbufs allocated to %s\n", | |
7306 | mbstat.m_mtypes[mp->mt_type], mp->mt_name); | |
7307 | MBUF_DUMP_BUF_CHK(); | |
7308 | } | |
7309 | } | |
7310 | seen[MT_FREE] = 1; | |
7311 | for (i = 0; i < nmbtypes; i++) | |
7312 | if (!seen[i] && mbstat.m_mtypes[i] != 0) { | |
7313 | k = snprintf(c, clen, "\t%u mbufs allocated to " | |
7314 | "<mbuf type %d>\n", mbstat.m_mtypes[i], i); | |
7315 | MBUF_DUMP_BUF_CHK(); | |
7316 | } | |
7317 | if ((m_mbufs - totmbufs) > 0) { | |
7318 | k = snprintf(c, clen, "\t%lu mbufs allocated to caches\n", | |
7319 | m_mbufs - totmbufs); | |
7320 | MBUF_DUMP_BUF_CHK(); | |
7321 | } | |
7322 | k = snprintf(c, clen, "%u/%u mbuf 2KB clusters in use\n" | |
7323 | "%u/%u mbuf 4KB clusters in use\n", | |
7324 | (unsigned int)(mbstat.m_clusters - m_clfree), | |
7325 | (unsigned int)mbstat.m_clusters, | |
7326 | (unsigned int)(mbstat.m_bigclusters - m_bigclfree), | |
7327 | (unsigned int)mbstat.m_bigclusters); | |
7328 | MBUF_DUMP_BUF_CHK(); | |
7329 | ||
7330 | if (njcl > 0) { | |
7331 | k = snprintf(c, clen, "%u/%u mbuf %uKB clusters in use\n", | |
7332 | m_16kclusters - m_16kclfree, m_16kclusters, | |
7333 | njclbytes / 1024); | |
7334 | MBUF_DUMP_BUF_CHK(); | |
7335 | } | |
7336 | totused = totmem - totfree; | |
7337 | if (totmem == 0) { | |
7338 | totpct = 0; | |
7339 | } else if (totused < (ULONG_MAX / 100)) { | |
7340 | totpct = (totused * 100) / totmem; | |
7341 | } else { | |
7342 | u_long totmem1 = totmem / 100; | |
7343 | u_long totused1 = totused / 100; | |
7344 | totpct = (totused1 * 100) / totmem1; | |
7345 | } | |
7346 | k = snprintf(c, clen, "%lu KB allocated to network (approx. %lu%% " | |
7347 | "in use)\n", totmem / 1024, totpct); | |
7348 | MBUF_DUMP_BUF_CHK(); | |
7349 | ||
316670eb A |
7350 | /* mbuf leak detection statistics */ |
7351 | mleak_update_stats(); | |
7352 | ||
7353 | k = snprintf(c, clen, "\nmbuf leak detection table:\n"); | |
7354 | MBUF_DUMP_BUF_CHK(); | |
7355 | k = snprintf(c, clen, "\ttotal captured: %u (one per %u)\n", | |
7356 | mleak_table.mleak_capture / mleak_table.mleak_sample_factor, | |
7357 | mleak_table.mleak_sample_factor); | |
7358 | MBUF_DUMP_BUF_CHK(); | |
7359 | k = snprintf(c, clen, "\ttotal allocs outstanding: %llu\n", | |
7360 | mleak_table.outstanding_allocs); | |
7361 | MBUF_DUMP_BUF_CHK(); | |
7362 | k = snprintf(c, clen, "\tnew hash recorded: %llu allocs, %llu traces\n", | |
7363 | mleak_table.alloc_recorded, mleak_table.trace_recorded); | |
7364 | MBUF_DUMP_BUF_CHK(); | |
7365 | k = snprintf(c, clen, "\thash collisions: %llu allocs, %llu traces\n", | |
7366 | mleak_table.alloc_collisions, mleak_table.trace_collisions); | |
7367 | MBUF_DUMP_BUF_CHK(); | |
7368 | k = snprintf(c, clen, "\toverwrites: %llu allocs, %llu traces\n", | |
7369 | mleak_table.alloc_overwrites, mleak_table.trace_overwrites); | |
7370 | MBUF_DUMP_BUF_CHK(); | |
7371 | k = snprintf(c, clen, "\tlock conflicts: %llu\n\n", | |
7372 | mleak_table.total_conflicts); | |
7373 | MBUF_DUMP_BUF_CHK(); | |
7374 | ||
7375 | k = snprintf(c, clen, "top %d outstanding traces:\n", | |
7376 | mleak_stat->ml_cnt); | |
7377 | MBUF_DUMP_BUF_CHK(); | |
7378 | for (i = 0; i < mleak_stat->ml_cnt; i++) { | |
7379 | mltr = &mleak_stat->ml_trace[i]; | |
7380 | k = snprintf(c, clen, "[%d] %llu outstanding alloc(s), " | |
7381 | "%llu hit(s), %llu collision(s)\n", (i + 1), | |
7382 | mltr->mltr_allocs, mltr->mltr_hitcount, | |
7383 | mltr->mltr_collisions); | |
7384 | MBUF_DUMP_BUF_CHK(); | |
7385 | } | |
7386 | ||
7387 | if (mleak_stat->ml_isaddr64) | |
7388 | k = snprintf(c, clen, MB_LEAK_HDR_64); | |
7389 | else | |
7390 | k = snprintf(c, clen, MB_LEAK_HDR_32); | |
7391 | MBUF_DUMP_BUF_CHK(); | |
7392 | ||
7393 | for (i = 0; i < MLEAK_STACK_DEPTH; i++) { | |
7394 | int j; | |
7395 | k = snprintf(c, clen, "%2d: ", (i + 1)); | |
7396 | MBUF_DUMP_BUF_CHK(); | |
7397 | for (j = 0; j < mleak_stat->ml_cnt; j++) { | |
7398 | mltr = &mleak_stat->ml_trace[j]; | |
7399 | if (i < mltr->mltr_depth) { | |
7400 | if (mleak_stat->ml_isaddr64) { | |
7401 | k = snprintf(c, clen, "0x%0llx ", | |
fe8ab488 A |
7402 | (uint64_t)VM_KERNEL_UNSLIDE( |
7403 | mltr->mltr_addr[i])); | |
316670eb A |
7404 | } else { |
7405 | k = snprintf(c, clen, | |
7406 | "0x%08x ", | |
fe8ab488 A |
7407 | (uint32_t)VM_KERNEL_UNSLIDE( |
7408 | mltr->mltr_addr[i])); | |
316670eb A |
7409 | } |
7410 | } else { | |
7411 | if (mleak_stat->ml_isaddr64) | |
7412 | k = snprintf(c, clen, | |
7413 | MB_LEAK_SPACING_64); | |
7414 | else | |
7415 | k = snprintf(c, clen, | |
7416 | MB_LEAK_SPACING_32); | |
7417 | } | |
7418 | MBUF_DUMP_BUF_CHK(); | |
7419 | } | |
7420 | k = snprintf(c, clen, "\n"); | |
7421 | MBUF_DUMP_BUF_CHK(); | |
7422 | } | |
6d2010ae A |
7423 | done: |
7424 | return (mbuf_dump_buf); | |
7425 | } | |
7426 | ||
7427 | #undef MBUF_DUMP_BUF_CHK | |
7428 | ||
39236c6e A |
7429 | /* |
7430 | * Convert between a regular and a packet header mbuf. Caller is responsible | |
7431 | * for setting or clearing M_PKTHDR; this routine does the rest of the work. | |
7432 | */ | |
7433 | int | |
7434 | m_reinit(struct mbuf *m, int hdr) | |
7435 | { | |
7436 | int ret = 0; | |
7437 | ||
7438 | if (hdr) { | |
7439 | VERIFY(!(m->m_flags & M_PKTHDR)); | |
7440 | if (!(m->m_flags & M_EXT) && | |
7441 | (m->m_data != m->m_dat || m->m_len > 0)) { | |
7442 | /* | |
7443 | * If there's no external cluster attached and the | |
7444 | * mbuf appears to contain user data, we cannot | |
7445 | * safely convert this to a packet header mbuf, | |
7446 | * as the packet header structure might overlap | |
7447 | * with the data. | |
7448 | */ | |
fe8ab488 A |
7449 | printf("%s: cannot set M_PKTHDR on altered mbuf %llx, " |
7450 | "m_data %llx (expected %llx), " | |
7451 | "m_len %d (expected 0)\n", | |
7452 | __func__, | |
7453 | (uint64_t)VM_KERNEL_ADDRPERM(m), | |
7454 | (uint64_t)VM_KERNEL_ADDRPERM(m->m_data), | |
7455 | (uint64_t)VM_KERNEL_ADDRPERM(m->m_dat), m->m_len); | |
39236c6e A |
7456 | ret = EBUSY; |
7457 | } else { | |
7458 | VERIFY((m->m_flags & M_EXT) || m->m_data == m->m_dat); | |
7459 | m->m_flags |= M_PKTHDR; | |
7460 | MBUF_INIT_PKTHDR(m); | |
7461 | } | |
7462 | } else { | |
7463 | /* Check for scratch area overflow */ | |
7464 | m_redzone_verify(m); | |
7465 | /* Free the aux data and tags if there is any */ | |
7466 | m_tag_delete_chain(m, NULL); | |
7467 | m->m_flags &= ~M_PKTHDR; | |
7468 | } | |
7469 | ||
7470 | return (ret); | |
7471 | } | |
7472 | ||
39037602 A |
7473 | int |
7474 | m_ext_set_prop(struct mbuf *m, uint32_t o, uint32_t n) | |
7475 | { | |
7476 | ASSERT(m->m_flags & M_EXT); | |
7477 | return (atomic_test_set_32(&MEXT_PRIV(m), o, n)); | |
7478 | } | |
7479 | ||
7480 | uint32_t | |
7481 | m_ext_get_prop(struct mbuf *m) | |
7482 | { | |
7483 | ASSERT(m->m_flags & M_EXT); | |
7484 | return (MEXT_PRIV(m)); | |
7485 | } | |
7486 | ||
7487 | int | |
7488 | m_ext_paired_is_active(struct mbuf *m) | |
7489 | { | |
7490 | return (MBUF_IS_PAIRED(m) ? (MEXT_PREF(m) > MEXT_MINREF(m)) : 1); | |
7491 | } | |
7492 | ||
7493 | void | |
7494 | m_ext_paired_activate(struct mbuf *m) | |
7495 | { | |
7496 | struct ext_ref *rfa; | |
7497 | int hdr, type; | |
7498 | caddr_t extbuf; | |
813fb2f6 | 7499 | m_ext_free_func_t extfree; |
39037602 A |
7500 | u_int extsize; |
7501 | ||
7502 | VERIFY(MBUF_IS_PAIRED(m)); | |
7503 | VERIFY(MEXT_REF(m) == MEXT_MINREF(m)); | |
7504 | VERIFY(MEXT_PREF(m) == MEXT_MINREF(m)); | |
7505 | ||
7506 | hdr = (m->m_flags & M_PKTHDR); | |
7507 | type = m->m_type; | |
7508 | extbuf = m->m_ext.ext_buf; | |
813fb2f6 | 7509 | extfree = m_get_ext_free(m); |
39037602 | 7510 | extsize = m->m_ext.ext_size; |
813fb2f6 | 7511 | rfa = m_get_rfa(m); |
39037602 A |
7512 | |
7513 | VERIFY(extbuf != NULL && rfa != NULL); | |
7514 | ||
7515 | /* | |
7516 | * Safe to reinitialize packet header tags, since it's | |
7517 | * already taken care of at m_free() time. Similar to | |
7518 | * what's done in m_clattach() for the cluster. Bump | |
7519 | * up MEXT_PREF to indicate activation. | |
7520 | */ | |
7521 | MBUF_INIT(m, hdr, type); | |
7522 | MEXT_INIT(m, extbuf, extsize, extfree, (caddr_t)m, rfa, | |
7523 | 1, 1, 2, EXTF_PAIRED, MEXT_PRIV(m), m); | |
7524 | } | |
7525 | ||
39236c6e A |
7526 | void |
7527 | m_scratch_init(struct mbuf *m) | |
7528 | { | |
fe8ab488 A |
7529 | struct pkthdr *pkt = &m->m_pkthdr; |
7530 | ||
39236c6e A |
7531 | VERIFY(m->m_flags & M_PKTHDR); |
7532 | ||
fe8ab488 A |
7533 | /* See comments in <rdar://problem/14040693> */ |
7534 | if (pkt->pkt_flags & PKTF_PRIV_GUARDED) { | |
7535 | panic_plain("Invalid attempt to modify guarded module-private " | |
7536 | "area: mbuf %p, pkt_flags 0x%x\n", m, pkt->pkt_flags); | |
7537 | /* NOTREACHED */ | |
7538 | } | |
7539 | ||
7540 | bzero(&pkt->pkt_mpriv, sizeof (pkt->pkt_mpriv)); | |
39236c6e A |
7541 | } |
7542 | ||
fe8ab488 A |
7543 | /* |
7544 | * This routine is reserved for mbuf_get_driver_scratch(); clients inside | |
7545 | * xnu that intend on utilizing the module-private area should directly | |
7546 | * refer to the pkt_mpriv structure in the pkthdr. They are also expected | |
7547 | * to set and clear PKTF_PRIV_GUARDED, while owning the packet and prior | |
7548 | * to handing it off to another module, respectively. | |
7549 | */ | |
39236c6e A |
7550 | u_int32_t |
7551 | m_scratch_get(struct mbuf *m, u_int8_t **p) | |
7552 | { | |
fe8ab488 A |
7553 | struct pkthdr *pkt = &m->m_pkthdr; |
7554 | ||
39236c6e A |
7555 | VERIFY(m->m_flags & M_PKTHDR); |
7556 | ||
fe8ab488 A |
7557 | /* See comments in <rdar://problem/14040693> */ |
7558 | if (pkt->pkt_flags & PKTF_PRIV_GUARDED) { | |
7559 | panic_plain("Invalid attempt to access guarded module-private " | |
7560 | "area: mbuf %p, pkt_flags 0x%x\n", m, pkt->pkt_flags); | |
7561 | /* NOTREACHED */ | |
7562 | } | |
7563 | ||
39236c6e A |
7564 | if (mcltrace) { |
7565 | mcache_audit_t *mca; | |
7566 | ||
7567 | lck_mtx_lock(mbuf_mlock); | |
7568 | mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m); | |
7569 | if (mca->mca_uflags & MB_SCVALID) | |
7570 | mcl_audit_scratch(mca); | |
7571 | lck_mtx_unlock(mbuf_mlock); | |
7572 | } | |
7573 | ||
fe8ab488 A |
7574 | *p = (u_int8_t *)&pkt->pkt_mpriv; |
7575 | return (sizeof (pkt->pkt_mpriv)); | |
39236c6e A |
7576 | } |
7577 | ||
7578 | static void | |
7579 | m_redzone_init(struct mbuf *m) | |
7580 | { | |
7581 | VERIFY(m->m_flags & M_PKTHDR); | |
7582 | /* | |
7583 | * Each mbuf has a unique red zone pattern, which is a XOR | |
7584 | * of the red zone cookie and the address of the mbuf. | |
7585 | */ | |
7586 | m->m_pkthdr.redzone = ((u_int32_t)(uintptr_t)m) ^ mb_redzone_cookie; | |
7587 | } | |
7588 | ||
7589 | static void | |
7590 | m_redzone_verify(struct mbuf *m) | |
7591 | { | |
7592 | u_int32_t mb_redzone; | |
7593 | ||
7594 | VERIFY(m->m_flags & M_PKTHDR); | |
7595 | ||
7596 | mb_redzone = ((u_int32_t)(uintptr_t)m) ^ mb_redzone_cookie; | |
7597 | if (m->m_pkthdr.redzone != mb_redzone) { | |
7598 | panic("mbuf %p redzone violation with value 0x%x " | |
7599 | "(instead of 0x%x, using cookie 0x%x)\n", | |
7600 | m, m->m_pkthdr.redzone, mb_redzone, mb_redzone_cookie); | |
7601 | /* NOTREACHED */ | |
7602 | } | |
7603 | } | |
7604 | ||
813fb2f6 A |
7605 | __private_extern__ inline void |
7606 | m_set_ext(struct mbuf *m, struct ext_ref *rfa, m_ext_free_func_t ext_free, | |
7607 | caddr_t ext_arg) | |
7608 | { | |
7609 | VERIFY(m->m_flags & M_EXT); | |
7610 | if (rfa != NULL) { | |
7611 | m->m_ext.ext_refflags = | |
7612 | (struct ext_ref *)(((uintptr_t)rfa) ^ mb_obscure_extref); | |
7613 | if (ext_free != NULL) { | |
7614 | rfa->ext_token = ((uintptr_t)&rfa->ext_token) ^ | |
7615 | mb_obscure_extfree; | |
7616 | m->m_ext.ext_free = (m_ext_free_func_t) | |
7617 | (((uintptr_t)ext_free) ^ rfa->ext_token); | |
7618 | if (ext_arg != NULL) { | |
7619 | m->m_ext.ext_arg = (((uintptr_t)ext_arg) ^ | |
7620 | rfa->ext_token); | |
7621 | } else { | |
7622 | m->m_ext.ext_arg = NULL; | |
7623 | } | |
7624 | } else { | |
7625 | rfa->ext_token = 0; | |
7626 | m->m_ext.ext_free = NULL; | |
7627 | m->m_ext.ext_arg = NULL; | |
7628 | } | |
7629 | } else { | |
7630 | /* | |
7631 | * If we are going to loose the cookie in ext_token by | |
7632 | * resetting the rfa, we should use the global cookie | |
7633 | * to obscure the ext_free and ext_arg pointers. | |
7634 | */ | |
7635 | if (ext_free != NULL) { | |
7636 | m->m_ext.ext_free = ((uintptr_t)ext_free ^ | |
7637 | mb_obscure_extfree); | |
7638 | if (ext_arg != NULL) { | |
7639 | m->m_ext.ext_arg = ((uintptr_t)ext_arg ^ | |
7640 | mb_obscure_extfree); | |
7641 | } else { | |
7642 | m->m_ext.ext_arg = NULL; | |
7643 | } | |
7644 | } else { | |
7645 | m->m_ext.ext_free = NULL; | |
7646 | m->m_ext.ext_arg = NULL; | |
7647 | } | |
7648 | m->m_ext.ext_refflags = NULL; | |
7649 | } | |
7650 | } | |
7651 | ||
7652 | __private_extern__ inline struct ext_ref * | |
7653 | m_get_rfa(struct mbuf *m) | |
7654 | { | |
7655 | if (m->m_ext.ext_refflags == NULL) | |
7656 | return (NULL); | |
7657 | else | |
7658 | return ((struct ext_ref *)(((uintptr_t)m->m_ext.ext_refflags) ^ mb_obscure_extref)); | |
7659 | } | |
7660 | ||
7661 | __private_extern__ inline m_ext_free_func_t | |
7662 | m_get_ext_free(struct mbuf *m) | |
7663 | { | |
7664 | struct ext_ref *rfa; | |
7665 | if (m->m_ext.ext_free == NULL) | |
7666 | return (NULL); | |
7667 | ||
7668 | rfa = m_get_rfa(m); | |
7669 | if (rfa == NULL) | |
7670 | return ((uintptr_t)m->m_ext.ext_free ^ mb_obscure_extfree); | |
7671 | else | |
7672 | return ((m_ext_free_func_t)(((uintptr_t)m->m_ext.ext_free) | |
7673 | ^ rfa->ext_token)); | |
7674 | } | |
7675 | ||
7676 | __private_extern__ inline caddr_t | |
7677 | m_get_ext_arg(struct mbuf *m) | |
7678 | { | |
7679 | struct ext_ref *rfa; | |
7680 | if (m->m_ext.ext_arg == NULL) | |
7681 | return (NULL); | |
7682 | ||
7683 | rfa = m_get_rfa(m); | |
7684 | if (rfa == NULL) { | |
7685 | return ((uintptr_t)m->m_ext.ext_arg ^ mb_obscure_extfree); | |
7686 | } else { | |
7687 | return ((caddr_t)(((uintptr_t)m->m_ext.ext_arg) ^ | |
7688 | rfa->ext_token)); | |
7689 | } | |
7690 | } | |
7691 | ||
fe8ab488 A |
7692 | /* |
7693 | * Send a report of mbuf usage if the usage is at least 6% of max limit | |
7694 | * or if there has been at least 3% increase since the last report. | |
7695 | * | |
7696 | * The values 6% and 3% are chosen so that we can do simple arithmetic | |
7697 | * with shift operations. | |
39037602 | 7698 | */ |
fe8ab488 A |
7699 | static boolean_t |
7700 | mbuf_report_usage(mbuf_class_t cl) | |
7701 | { | |
7702 | /* if a report is already in progress, nothing to do */ | |
7703 | if (mb_peak_newreport) | |
7704 | return (TRUE); | |
7705 | ||
7706 | if (m_total(cl) > m_peak(cl) && | |
7707 | m_total(cl) >= (m_maxlimit(cl) >> 4) && | |
7708 | (m_total(cl) - m_peak(cl)) >= (m_peak(cl) >> 5)) | |
7709 | return (TRUE); | |
7710 | return (FALSE); | |
7711 | } | |
7712 | ||
7713 | __private_extern__ void | |
7714 | mbuf_report_peak_usage(void) | |
7715 | { | |
39037602 | 7716 | int i = 0; |
fe8ab488 A |
7717 | u_int64_t uptime; |
7718 | struct nstat_sysinfo_data ns_data; | |
7719 | uint32_t memreleased = 0; | |
7720 | ||
7721 | uptime = net_uptime(); | |
7722 | lck_mtx_lock(mbuf_mlock); | |
7723 | ||
7724 | /* Generate an initial report after 1 week of uptime */ | |
39037602 | 7725 | if (!mb_peak_firstreport && |
fe8ab488 A |
7726 | uptime > MBUF_PEAK_FIRST_REPORT_THRESHOLD) { |
7727 | mb_peak_newreport = TRUE; | |
7728 | mb_peak_firstreport = TRUE; | |
7729 | } | |
7730 | ||
7731 | if (!mb_peak_newreport) { | |
7732 | lck_mtx_unlock(mbuf_mlock); | |
7733 | return; | |
7734 | } | |
7735 | ||
7736 | /* | |
39037602 | 7737 | * Since a report is being generated before 1 week, |
fe8ab488 A |
7738 | * we do not need to force another one later |
7739 | */ | |
7740 | if (uptime < MBUF_PEAK_FIRST_REPORT_THRESHOLD) | |
7741 | mb_peak_firstreport = TRUE; | |
7742 | ||
7743 | for (i = 0; i < NELEM(mbuf_table); i++) { | |
7744 | m_peak(m_class(i)) = m_total(m_class(i)); | |
7745 | memreleased += m_release_cnt(i); | |
3e170ce0 | 7746 | m_release_cnt(i) = 0; |
fe8ab488 A |
7747 | } |
7748 | mb_peak_newreport = FALSE; | |
7749 | lck_mtx_unlock(mbuf_mlock); | |
7750 | ||
7751 | bzero(&ns_data, sizeof(ns_data)); | |
7752 | ns_data.flags = NSTAT_SYSINFO_MBUF_STATS; | |
7753 | ns_data.u.mb_stats.total_256b = m_peak(MC_MBUF); | |
7754 | ns_data.u.mb_stats.total_2kb = m_peak(MC_CL); | |
7755 | ns_data.u.mb_stats.total_4kb = m_peak(MC_BIGCL); | |
3e170ce0 | 7756 | ns_data.u.mb_stats.total_16kb = m_peak(MC_16KCL); |
fe8ab488 A |
7757 | ns_data.u.mb_stats.sbmb_total = total_sbmb_cnt_peak; |
7758 | ns_data.u.mb_stats.sb_atmbuflimit = sbmb_limreached; | |
7759 | ns_data.u.mb_stats.draincnt = mbstat.m_drain; | |
7760 | ns_data.u.mb_stats.memreleased = memreleased; | |
39037602 | 7761 | ns_data.u.mb_stats.sbmb_floor = total_sbmb_cnt_floor; |
fe8ab488 A |
7762 | |
7763 | nstat_sysinfo_send_data(&ns_data); | |
39037602 A |
7764 | |
7765 | /* | |
7766 | * Reset the floor whenever we report a new | |
7767 | * peak to track the trend (increase peek usage | |
7768 | * is not a leak if mbufs get released | |
7769 | * between reports and the floor stays low) | |
7770 | */ | |
7771 | total_sbmb_cnt_floor = total_sbmb_cnt_peak; | |
fe8ab488 A |
7772 | } |
7773 | ||
7774 | /* | |
7775 | * Called by the VM when there's memory pressure. | |
7776 | */ | |
7777 | __private_extern__ void | |
7778 | m_drain(void) | |
7779 | { | |
7780 | mbuf_class_t mc; | |
7781 | mcl_slab_t *sp, *sp_tmp, *nsp; | |
7782 | unsigned int num, k, interval, released = 0; | |
39037602 | 7783 | unsigned long total_mem = 0, use_mem = 0; |
fe8ab488 A |
7784 | boolean_t ret, purge_caches = FALSE; |
7785 | ppnum_t offset; | |
7786 | mcache_obj_t *obj; | |
39037602 | 7787 | unsigned long per; |
fe8ab488 A |
7788 | static uint64_t last_drain = 0; |
7789 | static unsigned char scratch[32]; | |
7790 | static ppnum_t scratch_pa = 0; | |
7791 | ||
7792 | if (mb_drain_maxint == 0 || mb_waiters) | |
7793 | return; | |
7794 | if (scratch_pa == 0) { | |
7795 | bzero(scratch, sizeof(scratch)); | |
7796 | scratch_pa = pmap_find_phys(kernel_pmap, (addr64_t)scratch); | |
7797 | VERIFY(scratch_pa); | |
7798 | } else if (mclverify) { | |
7799 | /* | |
7800 | * Panic if a driver wrote to our scratch memory. | |
7801 | */ | |
7802 | for (k = 0; k < sizeof(scratch); k++) | |
7803 | if (scratch[k]) | |
7804 | panic("suspect DMA to freed address"); | |
7805 | } | |
7806 | /* | |
7807 | * Don't free memory too often as that could cause excessive | |
7808 | * waiting times for mbufs. Purge caches if we were asked to drain | |
7809 | * in the last 5 minutes. | |
7810 | */ | |
7811 | lck_mtx_lock(mbuf_mlock); | |
7812 | if (last_drain == 0) { | |
7813 | last_drain = net_uptime(); | |
7814 | lck_mtx_unlock(mbuf_mlock); | |
7815 | return; | |
7816 | } | |
39037602 | 7817 | interval = net_uptime() - last_drain; |
fe8ab488 A |
7818 | if (interval <= mb_drain_maxint) { |
7819 | lck_mtx_unlock(mbuf_mlock); | |
7820 | return; | |
39037602 | 7821 | } |
fe8ab488 A |
7822 | if (interval <= mb_drain_maxint * 5) |
7823 | purge_caches = TRUE; | |
7824 | last_drain = net_uptime(); | |
7825 | /* | |
7826 | * Don't free any memory if we're using 60% or more. | |
7827 | */ | |
7828 | for (mc = 0; mc < NELEM(mbuf_table); mc++) { | |
7829 | total_mem += m_total(mc) * m_maxsize(mc); | |
7830 | use_mem += m_active(mc) * m_maxsize(mc); | |
7831 | } | |
39037602 A |
7832 | per = (use_mem * 100) / total_mem; |
7833 | if (per >= 60) { | |
fe8ab488 A |
7834 | lck_mtx_unlock(mbuf_mlock); |
7835 | return; | |
7836 | } | |
7837 | /* | |
7838 | * Purge all the caches. This effectively disables | |
7839 | * caching for a few seconds, but the mbuf worker thread will | |
7840 | * re-enable them again. | |
7841 | */ | |
7842 | if (purge_caches == TRUE) | |
7843 | for (mc = 0; mc < NELEM(mbuf_table); mc++) { | |
7844 | if (m_total(mc) < m_avgtotal(mc)) | |
7845 | continue; | |
7846 | lck_mtx_unlock(mbuf_mlock); | |
7847 | ret = mcache_purge_cache(m_cache(mc), FALSE); | |
7848 | lck_mtx_lock(mbuf_mlock); | |
7849 | if (ret == TRUE) | |
7850 | m_purge_cnt(mc)++; | |
7851 | } | |
7852 | /* | |
7853 | * Move the objects from the composite class freelist to | |
7854 | * the rudimentary slabs list, but keep at least 10% of the average | |
7855 | * total in the freelist. | |
7856 | */ | |
7857 | for (mc = 0; mc < NELEM(mbuf_table); mc++) { | |
39037602 | 7858 | while (m_cobjlist(mc) && |
fe8ab488 A |
7859 | m_total(mc) < m_avgtotal(mc) && |
7860 | m_infree(mc) > 0.1 * m_avgtotal(mc) + m_minlimit(mc)) { | |
7861 | obj = m_cobjlist(mc); | |
7862 | m_cobjlist(mc) = obj->obj_next; | |
7863 | obj->obj_next = NULL; | |
7864 | num = cslab_free(mc, obj, 1); | |
7865 | VERIFY(num == 1); | |
7866 | m_free_cnt(mc)++; | |
7867 | m_infree(mc)--; | |
7868 | /* cslab_free() handles m_total */ | |
7869 | } | |
7870 | } | |
7871 | /* | |
7872 | * Free the buffers present in the slab list up to 10% of the total | |
7873 | * average per class. | |
7874 | * | |
7875 | * We walk the list backwards in an attempt to reduce fragmentation. | |
7876 | */ | |
7877 | for (mc = NELEM(mbuf_table) - 1; (int)mc >= 0; mc--) { | |
7878 | TAILQ_FOREACH_SAFE(sp, &m_slablist(mc), sl_link, sp_tmp) { | |
7879 | /* | |
7880 | * Process only unused slabs occupying memory. | |
7881 | */ | |
7882 | if (sp->sl_refcnt != 0 || sp->sl_len == 0 || | |
7883 | sp->sl_base == NULL) | |
7884 | continue; | |
7885 | if (m_total(mc) < m_avgtotal(mc) || | |
7886 | m_infree(mc) < 0.1 * m_avgtotal(mc) + m_minlimit(mc)) | |
7887 | break; | |
7888 | slab_remove(sp, mc); | |
7889 | switch (mc) { | |
7890 | case MC_MBUF: | |
3e170ce0 A |
7891 | m_infree(mc) -= NMBPG; |
7892 | m_total(mc) -= NMBPG; | |
fe8ab488 | 7893 | if (mclaudit != NULL) |
3e170ce0 | 7894 | mcl_audit_free(sp->sl_base, NMBPG); |
fe8ab488 A |
7895 | break; |
7896 | case MC_CL: | |
3e170ce0 A |
7897 | m_infree(mc) -= NCLPG; |
7898 | m_total(mc) -= NCLPG; | |
fe8ab488 | 7899 | if (mclaudit != NULL) |
3e170ce0 | 7900 | mcl_audit_free(sp->sl_base, NMBPG); |
fe8ab488 A |
7901 | break; |
7902 | case MC_BIGCL: | |
3e170ce0 A |
7903 | { |
7904 | m_infree(mc) -= NBCLPG; | |
7905 | m_total(mc) -= NBCLPG; | |
fe8ab488 | 7906 | if (mclaudit != NULL) |
3e170ce0 | 7907 | mcl_audit_free(sp->sl_base, NMBPG); |
fe8ab488 | 7908 | break; |
3e170ce0 | 7909 | } |
fe8ab488 A |
7910 | case MC_16KCL: |
7911 | m_infree(mc)--; | |
7912 | m_total(mc)--; | |
7913 | for (nsp = sp, k = 1; k < NSLABSP16KB; k++) { | |
7914 | nsp = nsp->sl_next; | |
39037602 | 7915 | VERIFY(nsp->sl_refcnt == 0 && |
fe8ab488 A |
7916 | nsp->sl_base != NULL && |
7917 | nsp->sl_len == 0); | |
7918 | slab_init(nsp, 0, 0, NULL, NULL, 0, 0, | |
7919 | 0); | |
7920 | nsp->sl_flags = 0; | |
7921 | } | |
7922 | if (mclaudit != NULL) | |
7923 | mcl_audit_free(sp->sl_base, 1); | |
7924 | break; | |
7925 | default: | |
7926 | /* | |
7927 | * The composite classes have their own | |
7928 | * freelist (m_cobjlist), so we only | |
7929 | * process rudimentary classes here. | |
7930 | */ | |
7931 | VERIFY(0); | |
7932 | } | |
7933 | m_release_cnt(mc) += m_size(mc); | |
7934 | released += m_size(mc); | |
3e170ce0 A |
7935 | VERIFY(sp->sl_base != NULL && |
7936 | sp->sl_len >= PAGE_SIZE); | |
7937 | offset = MTOPG(sp->sl_base); | |
fe8ab488 A |
7938 | /* |
7939 | * Make sure the IOMapper points to a valid, but | |
7940 | * bogus, address. This should prevent further DMA | |
7941 | * accesses to freed memory. | |
7942 | */ | |
7943 | IOMapperInsertPage(mcl_paddr_base, offset, scratch_pa); | |
7944 | mcl_paddr[offset] = 0; | |
39037602 | 7945 | kmem_free(mb_map, (vm_offset_t)sp->sl_base, |
fe8ab488 A |
7946 | sp->sl_len); |
7947 | slab_init(sp, 0, 0, NULL, NULL, 0, 0, 0); | |
7948 | sp->sl_flags = 0; | |
7949 | } | |
7950 | } | |
7951 | mbstat.m_drain++; | |
7952 | mbstat.m_bigclusters = m_total(MC_BIGCL); | |
7953 | mbstat.m_clusters = m_total(MC_CL); | |
7954 | mbstat.m_mbufs = m_total(MC_MBUF); | |
7955 | mbuf_stat_sync(); | |
7956 | mbuf_mtypes_sync(TRUE); | |
7957 | lck_mtx_unlock(mbuf_mlock); | |
7958 | } | |
7959 | ||
7960 | static int | |
7961 | m_drain_force_sysctl SYSCTL_HANDLER_ARGS | |
7962 | { | |
7963 | #pragma unused(arg1, arg2) | |
7964 | int val = 0, err; | |
39037602 | 7965 | |
fe8ab488 A |
7966 | err = sysctl_handle_int(oidp, &val, 0, req); |
7967 | if (err != 0 || req->newptr == USER_ADDR_NULL) | |
7968 | return (err); | |
7969 | if (val) | |
7970 | m_drain(); | |
7971 | ||
7972 | return (err); | |
7973 | } | |
7974 | ||
2d21ac55 | 7975 | SYSCTL_DECL(_kern_ipc); |
6d2010ae | 7976 | SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, |
fe8ab488 | 7977 | CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, |
2d21ac55 | 7978 | 0, 0, mbstat_sysctl, "S,mbstat", ""); |
6d2010ae | 7979 | SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_stat, |
fe8ab488 | 7980 | CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, |
2d21ac55 | 7981 | 0, 0, mb_stat_sysctl, "S,mb_stat", ""); |
6d2010ae | 7982 | SYSCTL_PROC(_kern_ipc, OID_AUTO, mleak_top_trace, |
fe8ab488 | 7983 | CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, |
6d2010ae A |
7984 | 0, 0, mleak_top_trace_sysctl, "S,mb_top_trace", ""); |
7985 | SYSCTL_PROC(_kern_ipc, OID_AUTO, mleak_table, | |
fe8ab488 | 7986 | CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, |
6d2010ae A |
7987 | 0, 0, mleak_table_sysctl, "S,mleak_table", ""); |
7988 | SYSCTL_INT(_kern_ipc, OID_AUTO, mleak_sample_factor, | |
7989 | CTLFLAG_RW | CTLFLAG_LOCKED, &mleak_table.mleak_sample_factor, 0, ""); | |
7990 | SYSCTL_INT(_kern_ipc, OID_AUTO, mb_normalized, | |
7991 | CTLFLAG_RD | CTLFLAG_LOCKED, &mb_normalized, 0, ""); | |
7992 | SYSCTL_INT(_kern_ipc, OID_AUTO, mb_watchdog, | |
7993 | CTLFLAG_RW | CTLFLAG_LOCKED, &mb_watchdog, 0, ""); | |
fe8ab488 | 7994 | SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_drain_force, |
39037602 | 7995 | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0, |
fe8ab488 A |
7996 | m_drain_force_sysctl, "I", |
7997 | "Forces the mbuf garbage collection to run"); | |
7998 | SYSCTL_INT(_kern_ipc, OID_AUTO, mb_drain_maxint, | |
7999 | CTLFLAG_RW | CTLFLAG_LOCKED, &mb_drain_maxint, 0, | |
8000 | "Minimum time interval between garbage collection"); |