]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
39037602 | 2 | * Copyright (c) 1998-2016 Apple Inc. All rights reserved. |
5d5c5d0d | 3 | * |
2d21ac55 A |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
1c79356b A |
27 | */ |
28 | /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ | |
29 | /* | |
30 | * Copyright (c) 1982, 1986, 1988, 1991, 1993 | |
31 | * The Regents of the University of California. All rights reserved. | |
32 | * | |
33 | * Redistribution and use in source and binary forms, with or without | |
34 | * modification, are permitted provided that the following conditions | |
35 | * are met: | |
36 | * 1. Redistributions of source code must retain the above copyright | |
37 | * notice, this list of conditions and the following disclaimer. | |
38 | * 2. Redistributions in binary form must reproduce the above copyright | |
39 | * notice, this list of conditions and the following disclaimer in the | |
40 | * documentation and/or other materials provided with the distribution. | |
41 | * 3. All advertising materials mentioning features or use of this software | |
42 | * must display the following acknowledgement: | |
43 | * This product includes software developed by the University of | |
44 | * California, Berkeley and its contributors. | |
45 | * 4. Neither the name of the University nor the names of its contributors | |
46 | * may be used to endorse or promote products derived from this software | |
47 | * without specific prior written permission. | |
48 | * | |
49 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
50 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
51 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
52 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
53 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
54 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
55 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
56 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
57 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
58 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
59 | * SUCH DAMAGE. | |
60 | * | |
61 | * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 | |
62 | */ | |
2d21ac55 A |
63 | /* |
64 | * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce | |
65 | * support for mandatory and extensible security protections. This notice | |
66 | * is included in support of clause 2.2 (b) of the Apple Public License, | |
67 | * Version 2.0. | |
1c79356b A |
68 | */ |
69 | ||
70 | #include <sys/param.h> | |
71 | #include <sys/systm.h> | |
72 | #include <sys/malloc.h> | |
73 | #include <sys/mbuf.h> | |
74 | #include <sys/kernel.h> | |
91447636 | 75 | #include <sys/sysctl.h> |
1c79356b A |
76 | #include <sys/syslog.h> |
77 | #include <sys/protosw.h> | |
78 | #include <sys/domain.h> | |
2d21ac55 | 79 | #include <sys/queue.h> |
b0d623f7 | 80 | #include <sys/proc.h> |
1c79356b | 81 | |
39236c6e A |
82 | #include <dev/random/randomdev.h> |
83 | ||
9bccf70c | 84 | #include <kern/kern_types.h> |
2d21ac55 A |
85 | #include <kern/simple_lock.h> |
86 | #include <kern/queue.h> | |
9bccf70c | 87 | #include <kern/sched_prim.h> |
39037602 | 88 | #include <kern/backtrace.h> |
2d21ac55 | 89 | #include <kern/cpu_number.h> |
6d2010ae | 90 | #include <kern/zalloc.h> |
2d21ac55 A |
91 | |
92 | #include <libkern/OSAtomic.h> | |
39236c6e | 93 | #include <libkern/OSDebug.h> |
2d21ac55 | 94 | #include <libkern/libkern.h> |
9bccf70c | 95 | |
55e303ae A |
96 | #include <IOKit/IOMapper.h> |
97 | ||
2d21ac55 A |
98 | #include <machine/limits.h> |
99 | #include <machine/machine_routines.h> | |
55e303ae | 100 | |
2d21ac55 A |
101 | #if CONFIG_MACF_NET |
102 | #include <security/mac_framework.h> | |
103 | #endif /* MAC_NET */ | |
104 | ||
105 | #include <sys/mcache.h> | |
fe8ab488 | 106 | #include <net/ntstat.h> |
1c79356b | 107 | |
2d21ac55 A |
108 | /* |
109 | * MBUF IMPLEMENTATION NOTES. | |
110 | * | |
111 | * There is a total of 5 per-CPU caches: | |
112 | * | |
113 | * MC_MBUF: | |
114 | * This is a cache of rudimentary objects of MSIZE in size; each | |
115 | * object represents an mbuf structure. This cache preserves only | |
116 | * the m_type field of the mbuf during its transactions. | |
117 | * | |
118 | * MC_CL: | |
119 | * This is a cache of rudimentary objects of MCLBYTES in size; each | |
120 | * object represents a mcluster structure. This cache does not | |
121 | * preserve the contents of the objects during its transactions. | |
122 | * | |
123 | * MC_BIGCL: | |
6d2010ae | 124 | * This is a cache of rudimentary objects of MBIGCLBYTES in size; each |
2d21ac55 A |
125 | * object represents a mbigcluster structure. This cache does not |
126 | * preserve the contents of the objects during its transaction. | |
127 | * | |
128 | * MC_MBUF_CL: | |
129 | * This is a cache of mbufs each having a cluster attached to it. | |
130 | * It is backed by MC_MBUF and MC_CL rudimentary caches. Several | |
131 | * fields of the mbuf related to the external cluster are preserved | |
132 | * during transactions. | |
133 | * | |
134 | * MC_MBUF_BIGCL: | |
135 | * This is a cache of mbufs each having a big cluster attached to it. | |
136 | * It is backed by MC_MBUF and MC_BIGCL rudimentary caches. Several | |
137 | * fields of the mbuf related to the external cluster are preserved | |
138 | * during transactions. | |
139 | * | |
140 | * OBJECT ALLOCATION: | |
141 | * | |
142 | * Allocation requests are handled first at the per-CPU (mcache) layer | |
143 | * before falling back to the slab layer. Performance is optimal when | |
144 | * the request is satisfied at the CPU layer because global data/lock | |
145 | * never gets accessed. When the slab layer is entered for allocation, | |
146 | * the slab freelist will be checked first for available objects before | |
147 | * the VM backing store is invoked. Slab layer operations are serialized | |
148 | * for all of the caches as the mbuf global lock is held most of the time. | |
149 | * Allocation paths are different depending on the class of objects: | |
150 | * | |
151 | * a. Rudimentary object: | |
152 | * | |
153 | * { m_get_common(), m_clattach(), m_mclget(), | |
154 | * m_mclalloc(), m_bigalloc(), m_copym_with_hdrs(), | |
155 | * composite object allocation } | |
156 | * | ^ | |
157 | * | | | |
158 | * | +-----------------------+ | |
159 | * v | | |
160 | * mcache_alloc/mcache_alloc_ext() mbuf_slab_audit() | |
161 | * | ^ | |
162 | * v | | |
163 | * [CPU cache] -------> (found?) -------+ | |
164 | * | | | |
165 | * v | | |
166 | * mbuf_slab_alloc() | | |
167 | * | | | |
168 | * v | | |
169 | * +---------> [freelist] -------> (found?) -------+ | |
170 | * | | | |
171 | * | v | |
172 | * | m_clalloc() | |
173 | * | | | |
174 | * | v | |
175 | * +---<<---- kmem_mb_alloc() | |
176 | * | |
177 | * b. Composite object: | |
178 | * | |
179 | * { m_getpackets_internal(), m_allocpacket_internal() } | |
180 | * | ^ | |
181 | * | | | |
182 | * | +------ (done) ---------+ | |
183 | * v | | |
184 | * mcache_alloc/mcache_alloc_ext() mbuf_cslab_audit() | |
185 | * | ^ | |
186 | * v | | |
187 | * [CPU cache] -------> (found?) -------+ | |
188 | * | | | |
189 | * v | | |
190 | * mbuf_cslab_alloc() | | |
191 | * | | | |
192 | * v | | |
193 | * [freelist] -------> (found?) -------+ | |
194 | * | | | |
195 | * v | | |
196 | * (rudimentary object) | | |
197 | * mcache_alloc/mcache_alloc_ext() ------>>-----+ | |
198 | * | |
199 | * Auditing notes: If auditing is enabled, buffers will be subjected to | |
200 | * integrity checks by the audit routine. This is done by verifying their | |
201 | * contents against DEADBEEF (free) pattern before returning them to caller. | |
202 | * As part of this step, the routine will also record the transaction and | |
203 | * pattern-fill the buffers with BADDCAFE (uninitialized) pattern. It will | |
204 | * also restore any constructed data structure fields if necessary. | |
205 | * | |
206 | * OBJECT DEALLOCATION: | |
207 | * | |
208 | * Freeing an object simply involves placing it into the CPU cache; this | |
209 | * pollutes the cache to benefit subsequent allocations. The slab layer | |
210 | * will only be entered if the object is to be purged out of the cache. | |
211 | * During normal operations, this happens only when the CPU layer resizes | |
212 | * its bucket while it's adjusting to the allocation load. Deallocation | |
213 | * paths are different depending on the class of objects: | |
214 | * | |
215 | * a. Rudimentary object: | |
216 | * | |
217 | * { m_free(), m_freem_list(), composite object deallocation } | |
218 | * | ^ | |
219 | * | | | |
220 | * | +------ (done) ---------+ | |
221 | * v | | |
222 | * mcache_free/mcache_free_ext() | | |
223 | * | | | |
224 | * v | | |
225 | * mbuf_slab_audit() | | |
226 | * | | | |
227 | * v | | |
228 | * [CPU cache] ---> (not purging?) -----+ | |
229 | * | | | |
230 | * v | | |
231 | * mbuf_slab_free() | | |
232 | * | | | |
233 | * v | | |
234 | * [freelist] ----------->>------------+ | |
3e170ce0 | 235 | * (objects get purged to VM only on demand) |
2d21ac55 A |
236 | * |
237 | * b. Composite object: | |
238 | * | |
239 | * { m_free(), m_freem_list() } | |
240 | * | ^ | |
241 | * | | | |
242 | * | +------ (done) ---------+ | |
243 | * v | | |
244 | * mcache_free/mcache_free_ext() | | |
245 | * | | | |
246 | * v | | |
247 | * mbuf_cslab_audit() | | |
248 | * | | | |
249 | * v | | |
250 | * [CPU cache] ---> (not purging?) -----+ | |
251 | * | | | |
252 | * v | | |
253 | * mbuf_cslab_free() | | |
254 | * | | | |
255 | * v | | |
256 | * [freelist] ---> (not purging?) -----+ | |
257 | * | | | |
258 | * v | | |
259 | * (rudimentary object) | | |
260 | * mcache_free/mcache_free_ext() ------->>------+ | |
261 | * | |
262 | * Auditing notes: If auditing is enabled, the audit routine will save | |
263 | * any constructed data structure fields (if necessary) before filling the | |
264 | * contents of the buffers with DEADBEEF (free) pattern and recording the | |
265 | * transaction. Buffers that are freed (whether at CPU or slab layer) are | |
266 | * expected to contain the free pattern. | |
267 | * | |
268 | * DEBUGGING: | |
269 | * | |
270 | * Debugging can be enabled by adding "mbuf_debug=0x3" to boot-args; this | |
271 | * translates to the mcache flags (MCF_VERIFY | MCF_AUDIT). Additionally, | |
272 | * the CPU layer cache can be disabled by setting the MCF_NOCPUCACHE flag, | |
6d2010ae A |
273 | * i.e. modify the boot argument parameter to "mbuf_debug=0x13". Leak |
274 | * detection may also be disabled by setting the MCF_NOLEAKLOG flag, e.g. | |
275 | * "mbuf_debug=0x113". Note that debugging consumes more CPU and memory. | |
2d21ac55 A |
276 | * |
277 | * Each object is associated with exactly one mcache_audit_t structure that | |
278 | * contains the information related to its last buffer transaction. Given | |
279 | * an address of an object, the audit structure can be retrieved by finding | |
280 | * the position of the object relevant to the base address of the cluster: | |
281 | * | |
282 | * +------------+ +=============+ | |
283 | * | mbuf addr | | mclaudit[i] | | |
284 | * +------------+ +=============+ | |
285 | * | | cl_audit[0] | | |
6d2010ae | 286 | * i = MTOBG(addr) +-------------+ |
2d21ac55 | 287 | * | +-----> | cl_audit[1] | -----> mcache_audit_t |
6d2010ae | 288 | * b = BGTOM(i) | +-------------+ |
2d21ac55 A |
289 | * | | | ... | |
290 | * x = MCLIDX(b, addr) | +-------------+ | |
291 | * | | | cl_audit[7] | | |
292 | * +-----------------+ +-------------+ | |
293 | * (e.g. x == 1) | |
294 | * | |
295 | * The mclaudit[] array is allocated at initialization time, but its contents | |
6d2010ae | 296 | * get populated when the corresponding cluster is created. Because a page |
3e170ce0 | 297 | * can be turned into NMBPG number of mbufs, we preserve enough space for the |
6d2010ae | 298 | * mbufs so that there is a 1-to-1 mapping between them. A page that never |
2d21ac55 | 299 | * gets (or has not yet) turned into mbufs will use only cl_audit[0] with the |
6d2010ae A |
300 | * remaining entries unused. For 16KB cluster, only one entry from the first |
301 | * page is allocated and used for the entire object. | |
2d21ac55 | 302 | */ |
91447636 | 303 | |
2d21ac55 A |
304 | /* TODO: should be in header file */ |
305 | /* kernel translater */ | |
b0d623f7 | 306 | extern vm_offset_t kmem_mb_alloc(vm_map_t, int, int); |
2d21ac55 | 307 | extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); |
1c79356b | 308 | extern vm_map_t mb_map; /* special map */ |
2d21ac55 A |
309 | |
310 | /* Global lock */ | |
316670eb A |
311 | decl_lck_mtx_data(static, mbuf_mlock_data); |
312 | static lck_mtx_t *mbuf_mlock = &mbuf_mlock_data; | |
2d21ac55 A |
313 | static lck_attr_t *mbuf_mlock_attr; |
314 | static lck_grp_t *mbuf_mlock_grp; | |
315 | static lck_grp_attr_t *mbuf_mlock_grp_attr; | |
316 | ||
317 | /* Back-end (common) layer */ | |
39037602 | 318 | static boolean_t mbuf_worker_needs_wakeup; /* wait channel for mbuf worker */ |
2d21ac55 A |
319 | static int mbuf_worker_ready; /* worker thread is runnable */ |
320 | static int mbuf_expand_mcl; /* number of cluster creation requets */ | |
321 | static int mbuf_expand_big; /* number of big cluster creation requests */ | |
6d2010ae | 322 | static int mbuf_expand_16k; /* number of 16KB cluster creation requests */ |
2d21ac55 | 323 | static int ncpu; /* number of CPUs */ |
b0d623f7 A |
324 | static ppnum_t *mcl_paddr; /* Array of cluster physical addresses */ |
325 | static ppnum_t mcl_pages; /* Size of array (# physical pages) */ | |
55e303ae | 326 | static ppnum_t mcl_paddr_base; /* Handle returned by IOMapper::iovmAlloc() */ |
2d21ac55 A |
327 | static mcache_t *ref_cache; /* Cache of cluster reference & flags */ |
328 | static mcache_t *mcl_audit_con_cache; /* Audit contents cache */ | |
329 | static unsigned int mbuf_debug; /* patchable mbuf mcache flags */ | |
330 | static unsigned int mb_normalized; /* number of packets "normalized" */ | |
b0d623f7 A |
331 | |
332 | #define MB_GROWTH_AGGRESSIVE 1 /* Threshold: 1/2 of total */ | |
6d2010ae | 333 | #define MB_GROWTH_NORMAL 2 /* Threshold: 3/4 of total */ |
2d21ac55 A |
334 | |
335 | typedef enum { | |
336 | MC_MBUF = 0, /* Regular mbuf */ | |
337 | MC_CL, /* Cluster */ | |
6d2010ae A |
338 | MC_BIGCL, /* Large (4KB) cluster */ |
339 | MC_16KCL, /* Jumbo (16KB) cluster */ | |
2d21ac55 | 340 | MC_MBUF_CL, /* mbuf + cluster */ |
6d2010ae A |
341 | MC_MBUF_BIGCL, /* mbuf + large (4KB) cluster */ |
342 | MC_MBUF_16KCL /* mbuf + jumbo (16KB) cluster */ | |
2d21ac55 A |
343 | } mbuf_class_t; |
344 | ||
345 | #define MBUF_CLASS_MIN MC_MBUF | |
346 | #define MBUF_CLASS_MAX MC_MBUF_16KCL | |
347 | #define MBUF_CLASS_LAST MC_16KCL | |
348 | #define MBUF_CLASS_VALID(c) \ | |
349 | ((int)(c) >= MBUF_CLASS_MIN && (int)(c) <= MBUF_CLASS_MAX) | |
350 | #define MBUF_CLASS_COMPOSITE(c) \ | |
351 | ((int)(c) > MBUF_CLASS_LAST) | |
91447636 | 352 | |
9bccf70c | 353 | |
2d21ac55 A |
354 | /* |
355 | * mbuf specific mcache allocation request flags. | |
356 | */ | |
357 | #define MCR_COMP MCR_USR1 /* for MC_MBUF_{CL,BIGCL,16KCL} caches */ | |
9bccf70c | 358 | |
2d21ac55 A |
359 | /* |
360 | * Per-cluster slab structure. | |
361 | * | |
362 | * A slab is a cluster control structure that contains one or more object | |
363 | * chunks; the available chunks are chained in the slab's freelist (sl_head). | |
364 | * Each time a chunk is taken out of the slab, the slab's reference count | |
365 | * gets incremented. When all chunks have been taken out, the empty slab | |
366 | * gets removed (SLF_DETACHED) from the class's slab list. A chunk that is | |
367 | * returned to a slab causes the slab's reference count to be decremented; | |
368 | * it also causes the slab to be reinserted back to class's slab list, if | |
369 | * it's not already done. | |
370 | * | |
371 | * Compartmentalizing of the object chunks into slabs allows us to easily | |
372 | * merge one or more slabs together when the adjacent slabs are idle, as | |
373 | * well as to convert or move a slab from one class to another; e.g. the | |
374 | * mbuf cluster slab can be converted to a regular cluster slab when all | |
375 | * mbufs in the slab have been freed. | |
376 | * | |
377 | * A slab may also span across multiple clusters for chunks larger than | |
378 | * a cluster's size. In this case, only the slab of the first cluster is | |
379 | * used. The rest of the slabs are marked with SLF_PARTIAL to indicate | |
380 | * that they are part of the larger slab. | |
6d2010ae A |
381 | * |
382 | * Each slab controls a page of memory. | |
2d21ac55 A |
383 | */ |
384 | typedef struct mcl_slab { | |
385 | struct mcl_slab *sl_next; /* neighboring slab */ | |
386 | u_int8_t sl_class; /* controlling mbuf class */ | |
387 | int8_t sl_refcnt; /* outstanding allocations */ | |
388 | int8_t sl_chunks; /* chunks (bufs) in this slab */ | |
389 | u_int16_t sl_flags; /* slab flags (see below) */ | |
390 | u_int16_t sl_len; /* slab length */ | |
391 | void *sl_base; /* base of allocated memory */ | |
392 | void *sl_head; /* first free buffer */ | |
393 | TAILQ_ENTRY(mcl_slab) sl_link; /* next/prev slab on freelist */ | |
394 | } mcl_slab_t; | |
395 | ||
396 | #define SLF_MAPPED 0x0001 /* backed by a mapped page */ | |
397 | #define SLF_PARTIAL 0x0002 /* part of another slab */ | |
398 | #define SLF_DETACHED 0x0004 /* not in slab freelist */ | |
1c79356b | 399 | |
2d21ac55 A |
400 | /* |
401 | * The array of slabs are broken into groups of arrays per 1MB of kernel | |
402 | * memory to reduce the footprint. Each group is allocated on demand | |
403 | * whenever a new piece of memory mapped in from the VM crosses the 1MB | |
404 | * boundary. | |
405 | */ | |
3e170ce0 | 406 | #define NSLABSPMB ((1 << MBSHIFT) >> PAGE_SHIFT) |
91447636 | 407 | |
2d21ac55 | 408 | typedef struct mcl_slabg { |
3e170ce0 | 409 | mcl_slab_t *slg_slab; /* group of slabs */ |
2d21ac55 | 410 | } mcl_slabg_t; |
1c79356b | 411 | |
6d2010ae A |
412 | /* |
413 | * Number of slabs needed to control a 16KB cluster object. | |
414 | */ | |
3e170ce0 | 415 | #define NSLABSP16KB (M16KCLBYTES >> PAGE_SHIFT) |
6d2010ae | 416 | |
2d21ac55 A |
417 | /* |
418 | * Per-cluster audit structure. | |
419 | */ | |
420 | typedef struct { | |
3e170ce0 | 421 | mcache_audit_t **cl_audit; /* array of audits */ |
2d21ac55 | 422 | } mcl_audit_t; |
91447636 | 423 | |
39236c6e A |
424 | typedef struct { |
425 | struct thread *msa_thread; /* thread doing transaction */ | |
426 | struct thread *msa_pthread; /* previous transaction thread */ | |
427 | uint32_t msa_tstamp; /* transaction timestamp (ms) */ | |
428 | uint32_t msa_ptstamp; /* prev transaction timestamp (ms) */ | |
429 | uint16_t msa_depth; /* pc stack depth */ | |
430 | uint16_t msa_pdepth; /* previous transaction pc stack */ | |
431 | void *msa_stack[MCACHE_STACK_DEPTH]; | |
432 | void *msa_pstack[MCACHE_STACK_DEPTH]; | |
433 | } mcl_scratch_audit_t; | |
434 | ||
435 | typedef struct { | |
436 | /* | |
437 | * Size of data from the beginning of an mbuf that covers m_hdr, | |
438 | * pkthdr and m_ext structures. If auditing is enabled, we allocate | |
439 | * a shadow mbuf structure of this size inside each audit structure, | |
440 | * and the contents of the real mbuf gets copied into it when the mbuf | |
441 | * is freed. This allows us to pattern-fill the mbuf for integrity | |
442 | * check, and to preserve any constructed mbuf fields (e.g. mbuf + | |
443 | * cluster cache case). Note that we don't save the contents of | |
444 | * clusters when they are freed; we simply pattern-fill them. | |
445 | */ | |
446 | u_int8_t sc_mbuf[(MSIZE - _MHLEN) + sizeof (_m_ext_t)]; | |
447 | mcl_scratch_audit_t sc_scratch __attribute__((aligned(8))); | |
448 | } mcl_saved_contents_t; | |
449 | ||
450 | #define AUDIT_CONTENTS_SIZE (sizeof (mcl_saved_contents_t)) | |
451 | ||
452 | #define MCA_SAVED_MBUF_PTR(_mca) \ | |
453 | ((struct mbuf *)(void *)((mcl_saved_contents_t *) \ | |
454 | (_mca)->mca_contents)->sc_mbuf) | |
455 | #define MCA_SAVED_MBUF_SIZE \ | |
456 | (sizeof (((mcl_saved_contents_t *)0)->sc_mbuf)) | |
457 | #define MCA_SAVED_SCRATCH_PTR(_mca) \ | |
458 | (&((mcl_saved_contents_t *)(_mca)->mca_contents)->sc_scratch) | |
fa4905b1 | 459 | |
2d21ac55 A |
460 | /* |
461 | * mbuf specific mcache audit flags | |
462 | */ | |
463 | #define MB_INUSE 0x01 /* object has not been returned to slab */ | |
464 | #define MB_COMP_INUSE 0x02 /* object has not been returned to cslab */ | |
465 | #define MB_SCVALID 0x04 /* object has valid saved contents */ | |
fa4905b1 | 466 | |
2d21ac55 A |
467 | /* |
468 | * Each of the following two arrays hold up to nmbclusters elements. | |
469 | */ | |
470 | static mcl_audit_t *mclaudit; /* array of cluster audit information */ | |
6d2010ae | 471 | static unsigned int maxclaudit; /* max # of entries in audit table */ |
2d21ac55 A |
472 | static mcl_slabg_t **slabstbl; /* cluster slabs table */ |
473 | static unsigned int maxslabgrp; /* max # of entries in slabs table */ | |
474 | static unsigned int slabgrp; /* # of entries in slabs table */ | |
475 | ||
476 | /* Globals */ | |
477 | int nclusters; /* # of clusters for non-jumbo (legacy) sizes */ | |
478 | int njcl; /* # of clusters for jumbo sizes */ | |
479 | int njclbytes; /* size of a jumbo cluster */ | |
3e170ce0 A |
480 | unsigned char *mbutl; /* first mapped cluster address */ |
481 | unsigned char *embutl; /* ending virtual address of mclusters */ | |
316670eb A |
482 | int _max_linkhdr; /* largest link-level header */ |
483 | int _max_protohdr; /* largest protocol header */ | |
2d21ac55 A |
484 | int max_hdr; /* largest link+protocol header */ |
485 | int max_datalen; /* MHLEN - max_hdr */ | |
486 | ||
6d2010ae A |
487 | static boolean_t mclverify; /* debug: pattern-checking */ |
488 | static boolean_t mcltrace; /* debug: stack tracing */ | |
489 | static boolean_t mclfindleak; /* debug: leak detection */ | |
316670eb | 490 | static boolean_t mclexpleak; /* debug: expose leak info to user space */ |
6d2010ae | 491 | |
39236c6e A |
492 | static struct timeval mb_start; /* beginning of time */ |
493 | ||
6d2010ae A |
494 | /* mbuf leak detection variables */ |
495 | static struct mleak_table mleak_table; | |
496 | static mleak_stat_t *mleak_stat; | |
497 | ||
498 | #define MLEAK_STAT_SIZE(n) \ | |
499 | ((size_t)(&((mleak_stat_t *)0)->ml_trace[n])) | |
500 | ||
501 | struct mallocation { | |
502 | mcache_obj_t *element; /* the alloc'ed element, NULL if unused */ | |
503 | u_int32_t trace_index; /* mtrace index for corresponding backtrace */ | |
504 | u_int32_t count; /* How many objects were requested */ | |
505 | u_int64_t hitcount; /* for determining hash effectiveness */ | |
506 | }; | |
507 | ||
508 | struct mtrace { | |
509 | u_int64_t collisions; | |
510 | u_int64_t hitcount; | |
511 | u_int64_t allocs; | |
512 | u_int64_t depth; | |
513 | uintptr_t addr[MLEAK_STACK_DEPTH]; | |
514 | }; | |
515 | ||
516 | /* Size must be a power of two for the zhash to be able to just mask off bits */ | |
517 | #define MLEAK_ALLOCATION_MAP_NUM 512 | |
518 | #define MLEAK_TRACE_MAP_NUM 256 | |
519 | ||
520 | /* | |
521 | * Sample factor for how often to record a trace. This is overwritable | |
522 | * by the boot-arg mleak_sample_factor. | |
523 | */ | |
524 | #define MLEAK_SAMPLE_FACTOR 500 | |
525 | ||
526 | /* | |
527 | * Number of top leakers recorded. | |
528 | */ | |
529 | #define MLEAK_NUM_TRACES 5 | |
530 | ||
316670eb A |
531 | #define MB_LEAK_SPACING_64 " " |
532 | #define MB_LEAK_SPACING_32 " " | |
533 | ||
534 | ||
535 | #define MB_LEAK_HDR_32 "\n\ | |
536 | trace [1] trace [2] trace [3] trace [4] trace [5] \n\ | |
537 | ---------- ---------- ---------- ---------- ---------- \n\ | |
538 | " | |
539 | ||
540 | #define MB_LEAK_HDR_64 "\n\ | |
541 | trace [1] trace [2] trace [3] \ | |
542 | trace [4] trace [5] \n\ | |
543 | ------------------ ------------------ ------------------ \ | |
544 | ------------------ ------------------ \n\ | |
545 | " | |
546 | ||
6d2010ae A |
547 | static uint32_t mleak_alloc_buckets = MLEAK_ALLOCATION_MAP_NUM; |
548 | static uint32_t mleak_trace_buckets = MLEAK_TRACE_MAP_NUM; | |
549 | ||
550 | /* Hashmaps of allocations and their corresponding traces */ | |
551 | static struct mallocation *mleak_allocations; | |
552 | static struct mtrace *mleak_traces; | |
553 | static struct mtrace *mleak_top_trace[MLEAK_NUM_TRACES]; | |
554 | ||
555 | /* Lock to protect mleak tables from concurrent modification */ | |
316670eb A |
556 | decl_lck_mtx_data(static, mleak_lock_data); |
557 | static lck_mtx_t *mleak_lock = &mleak_lock_data; | |
6d2010ae A |
558 | static lck_attr_t *mleak_lock_attr; |
559 | static lck_grp_t *mleak_lock_grp; | |
560 | static lck_grp_attr_t *mleak_lock_grp_attr; | |
561 | ||
39037602 A |
562 | /* Lock to protect the completion callback table */ |
563 | static lck_grp_attr_t *mbuf_tx_compl_tbl_lck_grp_attr = NULL; | |
564 | static lck_attr_t *mbuf_tx_compl_tbl_lck_attr = NULL; | |
565 | static lck_grp_t *mbuf_tx_compl_tbl_lck_grp = NULL; | |
566 | decl_lck_rw_data(, mbuf_tx_compl_tbl_lck_rw_data); | |
567 | lck_rw_t *mbuf_tx_compl_tbl_lock = &mbuf_tx_compl_tbl_lck_rw_data; | |
568 | ||
b0d623f7 A |
569 | extern u_int32_t high_sb_max; |
570 | ||
2d21ac55 A |
571 | /* The minimum number of objects that are allocated, to start. */ |
572 | #define MINCL 32 | |
573 | #define MINBIGCL (MINCL >> 1) | |
574 | #define MIN16KCL (MINCL >> 2) | |
575 | ||
576 | /* Low watermarks (only map in pages once free counts go below) */ | |
2d21ac55 A |
577 | #define MBIGCL_LOWAT MINBIGCL |
578 | #define M16KCL_LOWAT MIN16KCL | |
579 | ||
580 | typedef struct { | |
581 | mbuf_class_t mtbl_class; /* class type */ | |
582 | mcache_t *mtbl_cache; /* mcache for this buffer class */ | |
583 | TAILQ_HEAD(mcl_slhead, mcl_slab) mtbl_slablist; /* slab list */ | |
584 | mcache_obj_t *mtbl_cobjlist; /* composite objects freelist */ | |
585 | mb_class_stat_t *mtbl_stats; /* statistics fetchable via sysctl */ | |
586 | u_int32_t mtbl_maxsize; /* maximum buffer size */ | |
587 | int mtbl_minlimit; /* minimum allowed */ | |
588 | int mtbl_maxlimit; /* maximum allowed */ | |
589 | u_int32_t mtbl_wantpurge; /* purge during next reclaim */ | |
fe8ab488 | 590 | uint32_t mtbl_avgtotal; /* average total on iOS */ |
2d21ac55 A |
591 | } mbuf_table_t; |
592 | ||
593 | #define m_class(c) mbuf_table[c].mtbl_class | |
594 | #define m_cache(c) mbuf_table[c].mtbl_cache | |
595 | #define m_slablist(c) mbuf_table[c].mtbl_slablist | |
596 | #define m_cobjlist(c) mbuf_table[c].mtbl_cobjlist | |
597 | #define m_maxsize(c) mbuf_table[c].mtbl_maxsize | |
598 | #define m_minlimit(c) mbuf_table[c].mtbl_minlimit | |
599 | #define m_maxlimit(c) mbuf_table[c].mtbl_maxlimit | |
600 | #define m_wantpurge(c) mbuf_table[c].mtbl_wantpurge | |
fe8ab488 | 601 | #define m_avgtotal(c) mbuf_table[c].mtbl_avgtotal |
2d21ac55 A |
602 | #define m_cname(c) mbuf_table[c].mtbl_stats->mbcl_cname |
603 | #define m_size(c) mbuf_table[c].mtbl_stats->mbcl_size | |
604 | #define m_total(c) mbuf_table[c].mtbl_stats->mbcl_total | |
605 | #define m_active(c) mbuf_table[c].mtbl_stats->mbcl_active | |
606 | #define m_infree(c) mbuf_table[c].mtbl_stats->mbcl_infree | |
607 | #define m_slab_cnt(c) mbuf_table[c].mtbl_stats->mbcl_slab_cnt | |
608 | #define m_alloc_cnt(c) mbuf_table[c].mtbl_stats->mbcl_alloc_cnt | |
609 | #define m_free_cnt(c) mbuf_table[c].mtbl_stats->mbcl_free_cnt | |
610 | #define m_notified(c) mbuf_table[c].mtbl_stats->mbcl_notified | |
611 | #define m_purge_cnt(c) mbuf_table[c].mtbl_stats->mbcl_purge_cnt | |
612 | #define m_fail_cnt(c) mbuf_table[c].mtbl_stats->mbcl_fail_cnt | |
613 | #define m_ctotal(c) mbuf_table[c].mtbl_stats->mbcl_ctotal | |
fe8ab488 A |
614 | #define m_peak(c) mbuf_table[c].mtbl_stats->mbcl_peak_reported |
615 | #define m_release_cnt(c) mbuf_table[c].mtbl_stats->mbcl_release_cnt | |
2d21ac55 A |
616 | |
617 | static mbuf_table_t mbuf_table[] = { | |
618 | /* | |
619 | * The caches for mbufs, regular clusters and big clusters. | |
fe8ab488 A |
620 | * The average total values were based on data gathered by actual |
621 | * usage patterns on iOS. | |
2d21ac55 A |
622 | */ |
623 | { MC_MBUF, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_MBUF)), | |
fe8ab488 | 624 | NULL, NULL, 0, 0, 0, 0, 3000 }, |
2d21ac55 | 625 | { MC_CL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_CL)), |
fe8ab488 | 626 | NULL, NULL, 0, 0, 0, 0, 2000 }, |
2d21ac55 | 627 | { MC_BIGCL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_BIGCL)), |
fe8ab488 | 628 | NULL, NULL, 0, 0, 0, 0, 1000 }, |
2d21ac55 | 629 | { MC_16KCL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_16KCL)), |
fe8ab488 | 630 | NULL, NULL, 0, 0, 0, 0, 1000 }, |
2d21ac55 A |
631 | /* |
632 | * The following are special caches; they serve as intermediate | |
633 | * caches backed by the above rudimentary caches. Each object | |
634 | * in the cache is an mbuf with a cluster attached to it. Unlike | |
635 | * the above caches, these intermediate caches do not directly | |
636 | * deal with the slab structures; instead, the constructed | |
637 | * cached elements are simply stored in the freelists. | |
638 | */ | |
fe8ab488 A |
639 | { MC_MBUF_CL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 2000 }, |
640 | { MC_MBUF_BIGCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 1000 }, | |
641 | { MC_MBUF_16KCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 1000 }, | |
2d21ac55 A |
642 | }; |
643 | ||
644 | #define NELEM(a) (sizeof (a) / sizeof ((a)[0])) | |
645 | ||
646 | static void *mb_waitchan = &mbuf_table; /* wait channel for all caches */ | |
6d2010ae A |
647 | static int mb_waiters; /* number of waiters */ |
648 | ||
fe8ab488 A |
649 | boolean_t mb_peak_newreport = FALSE; |
650 | boolean_t mb_peak_firstreport = FALSE; | |
651 | ||
652 | /* generate a report by default after 1 week of uptime */ | |
653 | #define MBUF_PEAK_FIRST_REPORT_THRESHOLD 604800 | |
654 | ||
6d2010ae A |
655 | #define MB_WDT_MAXTIME 10 /* # of secs before watchdog panic */ |
656 | static struct timeval mb_wdtstart; /* watchdog start timestamp */ | |
316670eb A |
657 | static char *mbuf_dump_buf; |
658 | ||
659 | #define MBUF_DUMP_BUF_SIZE 2048 | |
6d2010ae A |
660 | |
661 | /* | |
662 | * mbuf watchdog is enabled by default on embedded platforms. It is | |
663 | * also toggeable via the kern.ipc.mb_watchdog sysctl. | |
fe8ab488 A |
664 | * Garbage collection is also enabled by default on embedded platforms. |
665 | * mb_drain_maxint controls the amount of time to wait (in seconds) before | |
666 | * consecutive calls to m_drain(). | |
6d2010ae | 667 | */ |
6d2010ae | 668 | static unsigned int mb_watchdog = 0; |
fe8ab488 | 669 | static unsigned int mb_drain_maxint = 0; |
39236c6e A |
670 | |
671 | /* Red zone */ | |
672 | static u_int32_t mb_redzone_cookie; | |
673 | static void m_redzone_init(struct mbuf *); | |
674 | static void m_redzone_verify(struct mbuf *m); | |
2d21ac55 A |
675 | |
676 | /* The following are used to serialize m_clalloc() */ | |
677 | static boolean_t mb_clalloc_busy; | |
678 | static void *mb_clalloc_waitchan = &mb_clalloc_busy; | |
679 | static int mb_clalloc_waiters; | |
680 | ||
6d2010ae | 681 | static void mbuf_mtypes_sync(boolean_t); |
2d21ac55 | 682 | static int mbstat_sysctl SYSCTL_HANDLER_ARGS; |
6d2010ae | 683 | static void mbuf_stat_sync(void); |
2d21ac55 | 684 | static int mb_stat_sysctl SYSCTL_HANDLER_ARGS; |
6d2010ae A |
685 | static int mleak_top_trace_sysctl SYSCTL_HANDLER_ARGS; |
686 | static int mleak_table_sysctl SYSCTL_HANDLER_ARGS; | |
687 | static char *mbuf_dump(void); | |
2d21ac55 A |
688 | static void mbuf_table_init(void); |
689 | static inline void m_incref(struct mbuf *); | |
39037602 | 690 | static inline u_int16_t m_decref(struct mbuf *); |
2d21ac55 A |
691 | static int m_clalloc(const u_int32_t, const int, const u_int32_t); |
692 | static void mbuf_worker_thread_init(void); | |
693 | static mcache_obj_t *slab_alloc(mbuf_class_t, int); | |
694 | static void slab_free(mbuf_class_t, mcache_obj_t *); | |
695 | static unsigned int mbuf_slab_alloc(void *, mcache_obj_t ***, | |
696 | unsigned int, int); | |
697 | static void mbuf_slab_free(void *, mcache_obj_t *, int); | |
698 | static void mbuf_slab_audit(void *, mcache_obj_t *, boolean_t); | |
699 | static void mbuf_slab_notify(void *, u_int32_t); | |
700 | static unsigned int cslab_alloc(mbuf_class_t, mcache_obj_t ***, | |
701 | unsigned int); | |
702 | static unsigned int cslab_free(mbuf_class_t, mcache_obj_t *, int); | |
703 | static unsigned int mbuf_cslab_alloc(void *, mcache_obj_t ***, | |
704 | unsigned int, int); | |
705 | static void mbuf_cslab_free(void *, mcache_obj_t *, int); | |
706 | static void mbuf_cslab_audit(void *, mcache_obj_t *, boolean_t); | |
707 | static int freelist_populate(mbuf_class_t, unsigned int, int); | |
6d2010ae | 708 | static void freelist_init(mbuf_class_t); |
2d21ac55 A |
709 | static boolean_t mbuf_cached_above(mbuf_class_t, int); |
710 | static boolean_t mbuf_steal(mbuf_class_t, unsigned int); | |
711 | static void m_reclaim(mbuf_class_t, unsigned int, boolean_t); | |
712 | static int m_howmany(int, size_t); | |
713 | static void mbuf_worker_thread(void); | |
6d2010ae | 714 | static void mbuf_watchdog(void); |
2d21ac55 A |
715 | static boolean_t mbuf_sleep(mbuf_class_t, unsigned int, int); |
716 | ||
717 | static void mcl_audit_init(void *, mcache_audit_t **, mcache_obj_t **, | |
718 | size_t, unsigned int); | |
fe8ab488 | 719 | static void mcl_audit_free(void *, unsigned int); |
2d21ac55 A |
720 | static mcache_audit_t *mcl_audit_buf2mca(mbuf_class_t, mcache_obj_t *); |
721 | static void mcl_audit_mbuf(mcache_audit_t *, void *, boolean_t, boolean_t); | |
722 | static void mcl_audit_cluster(mcache_audit_t *, void *, size_t, boolean_t, | |
723 | boolean_t); | |
724 | static void mcl_audit_restore_mbuf(struct mbuf *, mcache_audit_t *, boolean_t); | |
725 | static void mcl_audit_save_mbuf(struct mbuf *, mcache_audit_t *); | |
39236c6e | 726 | static void mcl_audit_scratch(mcache_audit_t *); |
2d21ac55 A |
727 | static void mcl_audit_mcheck_panic(struct mbuf *); |
728 | static void mcl_audit_verify_nextptr(void *, mcache_audit_t *); | |
729 | ||
6d2010ae A |
730 | static void mleak_activate(void); |
731 | static void mleak_logger(u_int32_t, mcache_obj_t *, boolean_t); | |
732 | static boolean_t mleak_log(uintptr_t *, mcache_obj_t *, uint32_t, int); | |
733 | static void mleak_free(mcache_obj_t *); | |
316670eb A |
734 | static void mleak_sort_traces(void); |
735 | static void mleak_update_stats(void); | |
6d2010ae | 736 | |
2d21ac55 A |
737 | static mcl_slab_t *slab_get(void *); |
738 | static void slab_init(mcl_slab_t *, mbuf_class_t, u_int32_t, | |
739 | void *, void *, unsigned int, int, int); | |
740 | static void slab_insert(mcl_slab_t *, mbuf_class_t); | |
741 | static void slab_remove(mcl_slab_t *, mbuf_class_t); | |
742 | static boolean_t slab_inrange(mcl_slab_t *, void *); | |
743 | static void slab_nextptr_panic(mcl_slab_t *, void *); | |
744 | static void slab_detach(mcl_slab_t *); | |
745 | static boolean_t slab_is_detached(mcl_slab_t *); | |
746 | ||
b0d623f7 A |
747 | static int m_copyback0(struct mbuf **, int, int, const void *, int, int); |
748 | static struct mbuf *m_split0(struct mbuf *, int, int, int); | |
fe8ab488 A |
749 | __private_extern__ void mbuf_report_peak_usage(void); |
750 | static boolean_t mbuf_report_usage(mbuf_class_t); | |
b0d623f7 A |
751 | |
752 | /* flags for m_copyback0 */ | |
753 | #define M_COPYBACK0_COPYBACK 0x0001 /* copyback from cp */ | |
754 | #define M_COPYBACK0_PRESERVE 0x0002 /* preserve original data */ | |
755 | #define M_COPYBACK0_COW 0x0004 /* do copy-on-write */ | |
756 | #define M_COPYBACK0_EXTEND 0x0008 /* extend chain */ | |
757 | ||
2d21ac55 A |
758 | /* |
759 | * This flag is set for all mbufs that come out of and into the composite | |
760 | * mbuf + cluster caches, i.e. MC_MBUF_CL and MC_MBUF_BIGCL. mbufs that | |
761 | * are marked with such a flag have clusters attached to them, and will be | |
762 | * treated differently when they are freed; instead of being placed back | |
763 | * into the mbuf and cluster freelists, the composite mbuf + cluster objects | |
764 | * are placed back into the appropriate composite cache's freelist, and the | |
765 | * actual freeing is deferred until the composite objects are purged. At | |
766 | * such a time, this flag will be cleared from the mbufs and the objects | |
767 | * will be freed into their own separate freelists. | |
768 | */ | |
769 | #define EXTF_COMPOSITE 0x1 | |
1c79356b | 770 | |
6d2010ae A |
771 | /* |
772 | * This flag indicates that the external cluster is read-only, i.e. it is | |
773 | * or was referred to by more than one mbufs. Once set, this flag is never | |
774 | * cleared. | |
775 | */ | |
776 | #define EXTF_READONLY 0x2 | |
39037602 A |
777 | /* |
778 | * This flag indicates that the external cluster is paired with the mbuf. | |
779 | * Pairing implies an external free routine defined which will be invoked | |
780 | * when the reference count drops to the minimum at m_free time. This | |
781 | * flag is never cleared. | |
782 | */ | |
783 | #define EXTF_PAIRED 0x4 | |
784 | ||
785 | #define EXTF_MASK \ | |
786 | (EXTF_COMPOSITE | EXTF_READONLY | EXTF_PAIRED) | |
6d2010ae | 787 | |
2d21ac55 | 788 | #define MEXT_RFA(m) ((m)->m_ext.ext_refflags) |
39037602 | 789 | #define MEXT_MINREF(m) (MEXT_RFA(m)->minref) |
2d21ac55 | 790 | #define MEXT_REF(m) (MEXT_RFA(m)->refcnt) |
39037602 | 791 | #define MEXT_PREF(m) (MEXT_RFA(m)->prefcnt) |
2d21ac55 | 792 | #define MEXT_FLAGS(m) (MEXT_RFA(m)->flags) |
39037602 A |
793 | #define MEXT_PRIV(m) (MEXT_RFA(m)->priv) |
794 | #define MEXT_PMBUF(m) (MEXT_RFA(m)->paired) | |
795 | #define MBUF_IS_COMPOSITE(m) \ | |
796 | (MEXT_REF(m) == MEXT_MINREF(m) && \ | |
797 | (MEXT_FLAGS(m) & EXTF_MASK) == EXTF_COMPOSITE) | |
798 | /* | |
799 | * This macro can be used to test if the mbuf is paired to an external | |
800 | * cluster. The test for MEXT_PMBUF being equal to the mbuf in subject | |
801 | * is important, as EXTF_PAIRED alone is insufficient since it is immutable, | |
802 | * and thus survives calls to m_free_paired. | |
803 | */ | |
804 | #define MBUF_IS_PAIRED(m) \ | |
805 | (((m)->m_flags & M_EXT) && \ | |
806 | (MEXT_FLAGS(m) & EXTF_MASK) == EXTF_PAIRED && \ | |
807 | MEXT_PMBUF(m) == (m)) | |
1c79356b | 808 | |
2d21ac55 A |
809 | /* |
810 | * Macros used to verify the integrity of the mbuf. | |
811 | */ | |
812 | #define _MCHECK(m) { \ | |
39037602 | 813 | if ((m)->m_type != MT_FREE && !MBUF_IS_PAIRED(m)) { \ |
2d21ac55 A |
814 | if (mclaudit == NULL) \ |
815 | panic("MCHECK: m_type=%d m=%p", \ | |
816 | (u_int16_t)(m)->m_type, m); \ | |
817 | else \ | |
818 | mcl_audit_mcheck_panic(m); \ | |
819 | } \ | |
820 | } | |
55e303ae | 821 | |
2d21ac55 | 822 | #define MBUF_IN_MAP(addr) \ |
3e170ce0 A |
823 | ((unsigned char *)(addr) >= mbutl && \ |
824 | (unsigned char *)(addr) < embutl) | |
55e303ae | 825 | |
2d21ac55 A |
826 | #define MRANGE(addr) { \ |
827 | if (!MBUF_IN_MAP(addr)) \ | |
828 | panic("MRANGE: address out of range 0x%p", addr); \ | |
1c79356b A |
829 | } |
830 | ||
831 | /* | |
2d21ac55 | 832 | * Macro version of mtod. |
1c79356b | 833 | */ |
2d21ac55 | 834 | #define MTOD(m, t) ((t)((m)->m_data)) |
1c79356b | 835 | |
2d21ac55 | 836 | /* |
3e170ce0 | 837 | * Macros to obtain page index given a base cluster address |
6d2010ae | 838 | */ |
3e170ce0 A |
839 | #define MTOPG(x) (((unsigned char *)x - mbutl) >> PAGE_SHIFT) |
840 | #define PGTOM(x) (mbutl + (x << PAGE_SHIFT)) | |
6d2010ae A |
841 | |
842 | /* | |
843 | * Macro to find the mbuf index relative to a base. | |
2d21ac55 | 844 | */ |
3e170ce0 A |
845 | #define MBPAGEIDX(c, m) \ |
846 | (((unsigned char *)(m) - (unsigned char *)(c)) >> MSIZESHIFT) | |
1c79356b | 847 | |
2d21ac55 | 848 | /* |
6d2010ae | 849 | * Same thing for 2KB cluster index. |
2d21ac55 | 850 | */ |
3e170ce0 A |
851 | #define CLPAGEIDX(c, m) \ |
852 | (((unsigned char *)(m) - (unsigned char *)(c)) >> MCLSHIFT) | |
853 | ||
854 | /* | |
855 | * Macro to find 4KB cluster index relative to a base | |
856 | */ | |
857 | #define BCLPAGEIDX(c, m) \ | |
858 | (((unsigned char *)(m) - (unsigned char *)(c)) >> MBIGCLSHIFT) | |
91447636 | 859 | |
2d21ac55 A |
860 | /* |
861 | * Macros used during mbuf and cluster initialization. | |
862 | */ | |
39236c6e A |
863 | #define MBUF_INIT_PKTHDR(m) { \ |
864 | (m)->m_pkthdr.rcvif = NULL; \ | |
865 | (m)->m_pkthdr.pkt_hdr = NULL; \ | |
866 | (m)->m_pkthdr.len = 0; \ | |
867 | (m)->m_pkthdr.csum_flags = 0; \ | |
868 | (m)->m_pkthdr.csum_data = 0; \ | |
869 | (m)->m_pkthdr.vlan_tag = 0; \ | |
870 | m_classifier_init(m, 0); \ | |
871 | m_tag_init(m, 1); \ | |
872 | m_scratch_init(m); \ | |
873 | m_redzone_init(m); \ | |
874 | } | |
875 | ||
2d21ac55 A |
876 | #define MBUF_INIT(m, pkthdr, type) { \ |
877 | _MCHECK(m); \ | |
878 | (m)->m_next = (m)->m_nextpkt = NULL; \ | |
879 | (m)->m_len = 0; \ | |
880 | (m)->m_type = type; \ | |
881 | if ((pkthdr) == 0) { \ | |
882 | (m)->m_data = (m)->m_dat; \ | |
883 | (m)->m_flags = 0; \ | |
884 | } else { \ | |
885 | (m)->m_data = (m)->m_pktdat; \ | |
886 | (m)->m_flags = M_PKTHDR; \ | |
39236c6e | 887 | MBUF_INIT_PKTHDR(m); \ |
2d21ac55 A |
888 | } \ |
889 | } | |
91447636 | 890 | |
39037602 A |
891 | #define MEXT_INIT(m, buf, size, free, arg, rfa, min, ref, pref, flag, \ |
892 | priv, pm) { \ | |
2d21ac55 A |
893 | (m)->m_data = (m)->m_ext.ext_buf = (buf); \ |
894 | (m)->m_flags |= M_EXT; \ | |
895 | (m)->m_ext.ext_size = (size); \ | |
896 | (m)->m_ext.ext_free = (free); \ | |
897 | (m)->m_ext.ext_arg = (arg); \ | |
2d21ac55 | 898 | MEXT_RFA(m) = (rfa); \ |
39037602 | 899 | MEXT_MINREF(m) = (min); \ |
2d21ac55 | 900 | MEXT_REF(m) = (ref); \ |
39037602 | 901 | MEXT_PREF(m) = (pref); \ |
2d21ac55 | 902 | MEXT_FLAGS(m) = (flag); \ |
39037602 A |
903 | MEXT_PRIV(m) = (priv); \ |
904 | MEXT_PMBUF(m) = (pm); \ | |
1c79356b A |
905 | } |
906 | ||
2d21ac55 | 907 | #define MBUF_CL_INIT(m, buf, rfa, ref, flag) \ |
39037602 A |
908 | MEXT_INIT(m, buf, m_maxsize(MC_CL), NULL, NULL, rfa, 0, \ |
909 | ref, 0, flag, 0, NULL) | |
2d21ac55 A |
910 | |
911 | #define MBUF_BIGCL_INIT(m, buf, rfa, ref, flag) \ | |
39037602 A |
912 | MEXT_INIT(m, buf, m_maxsize(MC_BIGCL), m_bigfree, NULL, rfa, 0, \ |
913 | ref, 0, flag, 0, NULL) | |
2d21ac55 A |
914 | |
915 | #define MBUF_16KCL_INIT(m, buf, rfa, ref, flag) \ | |
39037602 A |
916 | MEXT_INIT(m, buf, m_maxsize(MC_16KCL), m_16kfree, NULL, rfa, 0, \ |
917 | ref, 0, flag, 0, NULL) | |
2d21ac55 | 918 | |
1c79356b | 919 | /* |
2d21ac55 | 920 | * Macro to convert BSD malloc sleep flag to mcache's |
1c79356b | 921 | */ |
2d21ac55 | 922 | #define MSLEEPF(f) ((!((f) & M_DONTWAIT)) ? MCR_SLEEP : MCR_NOSLEEP) |
1c79356b | 923 | |
2d21ac55 A |
924 | /* |
925 | * The structure that holds all mbuf class statistics exportable via sysctl. | |
926 | * Similar to mbstat structure, the mb_stat structure is protected by the | |
927 | * global mbuf lock. It contains additional information about the classes | |
928 | * that allows for a more accurate view of the state of the allocator. | |
929 | */ | |
930 | struct mb_stat *mb_stat; | |
b0d623f7 | 931 | struct omb_stat *omb_stat; /* For backwards compatibility */ |
1c79356b | 932 | |
2d21ac55 A |
933 | #define MB_STAT_SIZE(n) \ |
934 | ((size_t)(&((mb_stat_t *)0)->mbs_class[n])) | |
b0d623f7 A |
935 | #define OMB_STAT_SIZE(n) \ |
936 | ((size_t)(&((struct omb_stat *)0)->mbs_class[n])) | |
1c79356b A |
937 | |
938 | /* | |
2d21ac55 A |
939 | * The legacy structure holding all of the mbuf allocation statistics. |
940 | * The actual statistics used by the kernel are stored in the mbuf_table | |
941 | * instead, and are updated atomically while the global mbuf lock is held. | |
942 | * They are mirrored in mbstat to support legacy applications (e.g. netstat). | |
943 | * Unlike before, the kernel no longer relies on the contents of mbstat for | |
944 | * its operations (e.g. cluster expansion) because the structure is exposed | |
945 | * to outside and could possibly be modified, therefore making it unsafe. | |
946 | * With the exception of the mbstat.m_mtypes array (see below), all of the | |
947 | * statistics are updated as they change. | |
1c79356b | 948 | */ |
2d21ac55 | 949 | struct mbstat mbstat; |
1c79356b | 950 | |
2d21ac55 A |
951 | #define MBSTAT_MTYPES_MAX \ |
952 | (sizeof (mbstat.m_mtypes) / sizeof (mbstat.m_mtypes[0])) | |
1c79356b A |
953 | |
954 | /* | |
2d21ac55 A |
955 | * Allocation statistics related to mbuf types (up to MT_MAX-1) are updated |
956 | * atomically and stored in a per-CPU structure which is lock-free; this is | |
957 | * done in order to avoid writing to the global mbstat data structure which | |
958 | * would cause false sharing. During sysctl request for kern.ipc.mbstat, | |
959 | * the statistics across all CPUs will be converged into the mbstat.m_mtypes | |
960 | * array and returned to the application. Any updates for types greater or | |
961 | * equal than MT_MAX would be done atomically to the mbstat; this slows down | |
962 | * performance but is okay since the kernel uses only up to MT_MAX-1 while | |
963 | * anything beyond that (up to type 255) is considered a corner case. | |
1c79356b | 964 | */ |
2d21ac55 A |
965 | typedef struct { |
966 | unsigned int cpu_mtypes[MT_MAX]; | |
39236c6e | 967 | } __attribute__((aligned(MAX_CPU_CACHE_LINE_SIZE), packed)) mtypes_cpu_t; |
1c79356b | 968 | |
2d21ac55 A |
969 | typedef struct { |
970 | mtypes_cpu_t mbs_cpu[1]; | |
971 | } mbuf_mtypes_t; | |
1c79356b | 972 | |
2d21ac55 A |
973 | static mbuf_mtypes_t *mbuf_mtypes; /* per-CPU statistics */ |
974 | ||
975 | #define MBUF_MTYPES_SIZE(n) \ | |
976 | ((size_t)(&((mbuf_mtypes_t *)0)->mbs_cpu[n])) | |
977 | ||
978 | #define MTYPES_CPU(p) \ | |
316670eb | 979 | ((mtypes_cpu_t *)(void *)((char *)(p) + MBUF_MTYPES_SIZE(cpu_number()))) |
2d21ac55 | 980 | |
2d21ac55 A |
981 | #define mtype_stat_add(type, n) { \ |
982 | if ((unsigned)(type) < MT_MAX) { \ | |
983 | mtypes_cpu_t *mbs = MTYPES_CPU(mbuf_mtypes); \ | |
984 | atomic_add_32(&mbs->cpu_mtypes[type], n); \ | |
6d2010ae A |
985 | } else if ((unsigned)(type) < (unsigned)MBSTAT_MTYPES_MAX) { \ |
986 | atomic_add_16((int16_t *)&mbstat.m_mtypes[type], n); \ | |
2d21ac55 | 987 | } \ |
1c79356b A |
988 | } |
989 | ||
2d21ac55 A |
990 | #define mtype_stat_sub(t, n) mtype_stat_add(t, -(n)) |
991 | #define mtype_stat_inc(t) mtype_stat_add(t, 1) | |
992 | #define mtype_stat_dec(t) mtype_stat_sub(t, 1) | |
91447636 | 993 | |
6d2010ae A |
994 | static void |
995 | mbuf_mtypes_sync(boolean_t locked) | |
2d21ac55 | 996 | { |
2d21ac55 A |
997 | int m, n; |
998 | mtypes_cpu_t mtc; | |
1c79356b | 999 | |
6d2010ae A |
1000 | if (locked) |
1001 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
1002 | ||
2d21ac55 A |
1003 | bzero(&mtc, sizeof (mtc)); |
1004 | for (m = 0; m < ncpu; m++) { | |
1005 | mtypes_cpu_t *scp = &mbuf_mtypes->mbs_cpu[m]; | |
1006 | mtypes_cpu_t temp; | |
9bccf70c | 1007 | |
2d21ac55 A |
1008 | bcopy(&scp->cpu_mtypes, &temp.cpu_mtypes, |
1009 | sizeof (temp.cpu_mtypes)); | |
91447636 | 1010 | |
2d21ac55 A |
1011 | for (n = 0; n < MT_MAX; n++) |
1012 | mtc.cpu_mtypes[n] += temp.cpu_mtypes[n]; | |
1013 | } | |
6d2010ae A |
1014 | if (!locked) |
1015 | lck_mtx_lock(mbuf_mlock); | |
2d21ac55 A |
1016 | for (n = 0; n < MT_MAX; n++) |
1017 | mbstat.m_mtypes[n] = mtc.cpu_mtypes[n]; | |
6d2010ae A |
1018 | if (!locked) |
1019 | lck_mtx_unlock(mbuf_mlock); | |
1c79356b A |
1020 | } |
1021 | ||
2d21ac55 | 1022 | static int |
6d2010ae | 1023 | mbstat_sysctl SYSCTL_HANDLER_ARGS |
1c79356b | 1024 | { |
2d21ac55 | 1025 | #pragma unused(oidp, arg1, arg2) |
6d2010ae A |
1026 | mbuf_mtypes_sync(FALSE); |
1027 | ||
1028 | return (SYSCTL_OUT(req, &mbstat, sizeof (mbstat))); | |
1029 | } | |
1030 | ||
1031 | static void | |
1032 | mbuf_stat_sync(void) | |
1033 | { | |
2d21ac55 | 1034 | mb_class_stat_t *sp; |
6d2010ae A |
1035 | mcache_cpu_t *ccp; |
1036 | mcache_t *cp; | |
1037 | int k, m, bktsize; | |
1038 | ||
1039 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
2d21ac55 | 1040 | |
2d21ac55 A |
1041 | for (k = 0; k < NELEM(mbuf_table); k++) { |
1042 | cp = m_cache(k); | |
1043 | ccp = &cp->mc_cpu[0]; | |
1044 | bktsize = ccp->cc_bktsize; | |
1045 | sp = mbuf_table[k].mtbl_stats; | |
1046 | ||
1047 | if (cp->mc_flags & MCF_NOCPUCACHE) | |
1048 | sp->mbcl_mc_state = MCS_DISABLED; | |
1049 | else if (cp->mc_purge_cnt > 0) | |
1050 | sp->mbcl_mc_state = MCS_PURGING; | |
1051 | else if (bktsize == 0) | |
1052 | sp->mbcl_mc_state = MCS_OFFLINE; | |
1053 | else | |
1054 | sp->mbcl_mc_state = MCS_ONLINE; | |
1055 | ||
1056 | sp->mbcl_mc_cached = 0; | |
1057 | for (m = 0; m < ncpu; m++) { | |
1058 | ccp = &cp->mc_cpu[m]; | |
1059 | if (ccp->cc_objs > 0) | |
1060 | sp->mbcl_mc_cached += ccp->cc_objs; | |
1061 | if (ccp->cc_pobjs > 0) | |
1062 | sp->mbcl_mc_cached += ccp->cc_pobjs; | |
1063 | } | |
1064 | sp->mbcl_mc_cached += (cp->mc_full.bl_total * bktsize); | |
1065 | sp->mbcl_active = sp->mbcl_total - sp->mbcl_mc_cached - | |
1066 | sp->mbcl_infree; | |
1067 | ||
1068 | sp->mbcl_mc_waiter_cnt = cp->mc_waiter_cnt; | |
1069 | sp->mbcl_mc_wretry_cnt = cp->mc_wretry_cnt; | |
1070 | sp->mbcl_mc_nwretry_cnt = cp->mc_nwretry_cnt; | |
1071 | ||
1072 | /* Calculate total count specific to each class */ | |
1073 | sp->mbcl_ctotal = sp->mbcl_total; | |
1074 | switch (m_class(k)) { | |
1075 | case MC_MBUF: | |
1076 | /* Deduct mbufs used in composite caches */ | |
1077 | sp->mbcl_ctotal -= (m_total(MC_MBUF_CL) + | |
1078 | m_total(MC_MBUF_BIGCL)); | |
1079 | break; | |
91447636 | 1080 | |
2d21ac55 | 1081 | case MC_CL: |
6d2010ae A |
1082 | /* Deduct clusters used in composite cache */ |
1083 | sp->mbcl_ctotal -= m_total(MC_MBUF_CL); | |
2d21ac55 | 1084 | break; |
91447636 | 1085 | |
2d21ac55 A |
1086 | case MC_BIGCL: |
1087 | /* Deduct clusters used in composite cache */ | |
1088 | sp->mbcl_ctotal -= m_total(MC_MBUF_BIGCL); | |
1089 | break; | |
1c79356b | 1090 | |
2d21ac55 A |
1091 | case MC_16KCL: |
1092 | /* Deduct clusters used in composite cache */ | |
1093 | sp->mbcl_ctotal -= m_total(MC_MBUF_16KCL); | |
1094 | break; | |
1095 | ||
1096 | default: | |
1097 | break; | |
1098 | } | |
1099 | } | |
6d2010ae A |
1100 | } |
1101 | ||
1102 | static int | |
1103 | mb_stat_sysctl SYSCTL_HANDLER_ARGS | |
1104 | { | |
1105 | #pragma unused(oidp, arg1, arg2) | |
1106 | void *statp; | |
1107 | int k, statsz, proc64 = proc_is64bit(req->p); | |
1108 | ||
1109 | lck_mtx_lock(mbuf_mlock); | |
1110 | mbuf_stat_sync(); | |
b0d623f7 A |
1111 | |
1112 | if (!proc64) { | |
1113 | struct omb_class_stat *oc; | |
1114 | struct mb_class_stat *c; | |
1115 | ||
1116 | omb_stat->mbs_cnt = mb_stat->mbs_cnt; | |
1117 | oc = &omb_stat->mbs_class[0]; | |
1118 | c = &mb_stat->mbs_class[0]; | |
1119 | for (k = 0; k < omb_stat->mbs_cnt; k++, oc++, c++) { | |
1120 | (void) snprintf(oc->mbcl_cname, sizeof (oc->mbcl_cname), | |
1121 | "%s", c->mbcl_cname); | |
1122 | oc->mbcl_size = c->mbcl_size; | |
1123 | oc->mbcl_total = c->mbcl_total; | |
1124 | oc->mbcl_active = c->mbcl_active; | |
1125 | oc->mbcl_infree = c->mbcl_infree; | |
1126 | oc->mbcl_slab_cnt = c->mbcl_slab_cnt; | |
1127 | oc->mbcl_alloc_cnt = c->mbcl_alloc_cnt; | |
1128 | oc->mbcl_free_cnt = c->mbcl_free_cnt; | |
1129 | oc->mbcl_notified = c->mbcl_notified; | |
1130 | oc->mbcl_purge_cnt = c->mbcl_purge_cnt; | |
1131 | oc->mbcl_fail_cnt = c->mbcl_fail_cnt; | |
1132 | oc->mbcl_ctotal = c->mbcl_ctotal; | |
fe8ab488 | 1133 | oc->mbcl_release_cnt = c->mbcl_release_cnt; |
b0d623f7 A |
1134 | oc->mbcl_mc_state = c->mbcl_mc_state; |
1135 | oc->mbcl_mc_cached = c->mbcl_mc_cached; | |
1136 | oc->mbcl_mc_waiter_cnt = c->mbcl_mc_waiter_cnt; | |
1137 | oc->mbcl_mc_wretry_cnt = c->mbcl_mc_wretry_cnt; | |
1138 | oc->mbcl_mc_nwretry_cnt = c->mbcl_mc_nwretry_cnt; | |
1139 | } | |
1140 | statp = omb_stat; | |
1141 | statsz = OMB_STAT_SIZE(NELEM(mbuf_table)); | |
1142 | } else { | |
1143 | statp = mb_stat; | |
1144 | statsz = MB_STAT_SIZE(NELEM(mbuf_table)); | |
1145 | } | |
1146 | ||
2d21ac55 | 1147 | lck_mtx_unlock(mbuf_mlock); |
9bccf70c | 1148 | |
b0d623f7 | 1149 | return (SYSCTL_OUT(req, statp, statsz)); |
2d21ac55 | 1150 | } |
91447636 | 1151 | |
6d2010ae A |
1152 | static int |
1153 | mleak_top_trace_sysctl SYSCTL_HANDLER_ARGS | |
1154 | { | |
1155 | #pragma unused(oidp, arg1, arg2) | |
6d2010ae A |
1156 | int i; |
1157 | ||
1158 | /* Ensure leak tracing turned on */ | |
316670eb | 1159 | if (!mclfindleak || !mclexpleak) |
6d2010ae A |
1160 | return (ENXIO); |
1161 | ||
6d2010ae | 1162 | lck_mtx_lock(mleak_lock); |
316670eb | 1163 | mleak_update_stats(); |
6d2010ae A |
1164 | i = SYSCTL_OUT(req, mleak_stat, MLEAK_STAT_SIZE(MLEAK_NUM_TRACES)); |
1165 | lck_mtx_unlock(mleak_lock); | |
1166 | ||
1167 | return (i); | |
1168 | } | |
1169 | ||
1170 | static int | |
1171 | mleak_table_sysctl SYSCTL_HANDLER_ARGS | |
1172 | { | |
1173 | #pragma unused(oidp, arg1, arg2) | |
1174 | int i = 0; | |
1175 | ||
1176 | /* Ensure leak tracing turned on */ | |
316670eb | 1177 | if (!mclfindleak || !mclexpleak) |
6d2010ae A |
1178 | return (ENXIO); |
1179 | ||
1180 | lck_mtx_lock(mleak_lock); | |
1181 | i = SYSCTL_OUT(req, &mleak_table, sizeof (mleak_table)); | |
1182 | lck_mtx_unlock(mleak_lock); | |
1183 | ||
1184 | return (i); | |
1185 | } | |
1186 | ||
2d21ac55 A |
1187 | static inline void |
1188 | m_incref(struct mbuf *m) | |
1189 | { | |
39037602 A |
1190 | UInt16 old, new; |
1191 | volatile UInt16 *addr = (volatile UInt16 *)&MEXT_REF(m); | |
91447636 | 1192 | |
2d21ac55 A |
1193 | do { |
1194 | old = *addr; | |
1195 | new = old + 1; | |
1196 | ASSERT(new != 0); | |
39037602 | 1197 | } while (!OSCompareAndSwap16(old, new, addr)); |
6d2010ae A |
1198 | |
1199 | /* | |
1200 | * If cluster is shared, mark it with (sticky) EXTF_READONLY; | |
39037602 A |
1201 | * we don't clear the flag when the refcount goes back to the |
1202 | * minimum, to simplify code calling m_mclhasreference(). | |
6d2010ae | 1203 | */ |
39037602 A |
1204 | if (new > (MEXT_MINREF(m) + 1) && !(MEXT_FLAGS(m) & EXTF_READONLY)) |
1205 | (void) OSBitOrAtomic16(EXTF_READONLY, &MEXT_FLAGS(m)); | |
1c79356b A |
1206 | } |
1207 | ||
39037602 | 1208 | static inline u_int16_t |
2d21ac55 | 1209 | m_decref(struct mbuf *m) |
1c79356b | 1210 | { |
39037602 A |
1211 | UInt16 old, new; |
1212 | volatile UInt16 *addr = (volatile UInt16 *)&MEXT_REF(m); | |
1c79356b | 1213 | |
2d21ac55 A |
1214 | do { |
1215 | old = *addr; | |
1216 | new = old - 1; | |
1217 | ASSERT(old != 0); | |
39037602 | 1218 | } while (!OSCompareAndSwap16(old, new, addr)); |
2d21ac55 A |
1219 | |
1220 | return (new); | |
1c79356b A |
1221 | } |
1222 | ||
2d21ac55 A |
1223 | static void |
1224 | mbuf_table_init(void) | |
1c79356b | 1225 | { |
6d2010ae | 1226 | unsigned int b, c, s; |
3e170ce0 | 1227 | int m, config_mbuf_jumbo = 0; |
91447636 | 1228 | |
b0d623f7 A |
1229 | MALLOC(omb_stat, struct omb_stat *, OMB_STAT_SIZE(NELEM(mbuf_table)), |
1230 | M_TEMP, M_WAITOK | M_ZERO); | |
1231 | VERIFY(omb_stat != NULL); | |
1232 | ||
2d21ac55 A |
1233 | MALLOC(mb_stat, mb_stat_t *, MB_STAT_SIZE(NELEM(mbuf_table)), |
1234 | M_TEMP, M_WAITOK | M_ZERO); | |
1235 | VERIFY(mb_stat != NULL); | |
1c79356b | 1236 | |
2d21ac55 A |
1237 | mb_stat->mbs_cnt = NELEM(mbuf_table); |
1238 | for (m = 0; m < NELEM(mbuf_table); m++) | |
1239 | mbuf_table[m].mtbl_stats = &mb_stat->mbs_class[m]; | |
1c79356b | 1240 | |
2d21ac55 | 1241 | #if CONFIG_MBUF_JUMBO |
3e170ce0 | 1242 | config_mbuf_jumbo = 1; |
2d21ac55 | 1243 | #endif /* CONFIG_MBUF_JUMBO */ |
9bccf70c | 1244 | |
3e170ce0 A |
1245 | if (config_mbuf_jumbo == 1 || PAGE_SIZE == M16KCLBYTES) { |
1246 | /* | |
1247 | * Set aside 1/3 of the mbuf cluster map for jumbo | |
1248 | * clusters; we do this only on platforms where jumbo | |
1249 | * cluster pool is enabled. | |
1250 | */ | |
1251 | njcl = nmbclusters / 3; | |
1252 | njclbytes = M16KCLBYTES; | |
1253 | } | |
1254 | ||
2d21ac55 | 1255 | /* |
6d2010ae A |
1256 | * nclusters holds both the 2KB and 4KB pools, so ensure it's |
1257 | * a multiple of 4KB clusters. | |
2d21ac55 | 1258 | */ |
3e170ce0 | 1259 | nclusters = P2ROUNDDOWN(nmbclusters - njcl, NCLPG); |
2d21ac55 A |
1260 | if (njcl > 0) { |
1261 | /* | |
6d2010ae A |
1262 | * Each jumbo cluster takes 8 2KB clusters, so make |
1263 | * sure that the pool size is evenly divisible by 8; | |
1264 | * njcl is in 2KB unit, hence treated as such. | |
2d21ac55 | 1265 | */ |
3e170ce0 | 1266 | njcl = P2ROUNDDOWN(nmbclusters - nclusters, NCLPJCL); |
1c79356b | 1267 | |
6d2010ae | 1268 | /* Update nclusters with rounded down value of njcl */ |
3e170ce0 | 1269 | nclusters = P2ROUNDDOWN(nmbclusters - njcl, NCLPG); |
9bccf70c | 1270 | } |
2d21ac55 A |
1271 | |
1272 | /* | |
3e170ce0 A |
1273 | * njcl is valid only on platforms with 16KB jumbo clusters or |
1274 | * with 16KB pages, where it is configured to 1/3 of the pool | |
1275 | * size. On these platforms, the remaining is used for 2KB | |
1276 | * and 4KB clusters. On platforms without 16KB jumbo clusters, | |
1277 | * the entire pool is used for both 2KB and 4KB clusters. A 4KB | |
1278 | * cluster can either be splitted into 16 mbufs, or into 2 2KB | |
1279 | * clusters. | |
6d2010ae A |
1280 | * |
1281 | * +---+---+------------ ... -----------+------- ... -------+ | |
1282 | * | c | b | s | njcl | | |
1283 | * +---+---+------------ ... -----------+------- ... -------+ | |
1284 | * | |
1285 | * 1/32th of the shared region is reserved for pure 2KB and 4KB | |
1286 | * clusters (1/64th each.) | |
1287 | */ | |
3e170ce0 A |
1288 | c = P2ROUNDDOWN((nclusters >> 6), NCLPG); /* in 2KB unit */ |
1289 | b = P2ROUNDDOWN((nclusters >> (6 + NCLPBGSHIFT)), NBCLPG); /* in 4KB unit */ | |
6d2010ae A |
1290 | s = nclusters - (c + (b << NCLPBGSHIFT)); /* in 2KB unit */ |
1291 | ||
1292 | /* | |
1293 | * 1/64th (c) is reserved for 2KB clusters. | |
2d21ac55 | 1294 | */ |
6d2010ae A |
1295 | m_minlimit(MC_CL) = c; |
1296 | m_maxlimit(MC_CL) = s + c; /* in 2KB unit */ | |
2d21ac55 A |
1297 | m_maxsize(MC_CL) = m_size(MC_CL) = MCLBYTES; |
1298 | (void) snprintf(m_cname(MC_CL), MAX_MBUF_CNAME, "cl"); | |
1299 | ||
1300 | /* | |
6d2010ae A |
1301 | * Another 1/64th (b) of the map is reserved for 4KB clusters. |
1302 | * It cannot be turned into 2KB clusters or mbufs. | |
2d21ac55 | 1303 | */ |
6d2010ae A |
1304 | m_minlimit(MC_BIGCL) = b; |
1305 | m_maxlimit(MC_BIGCL) = (s >> NCLPBGSHIFT) + b; /* in 4KB unit */ | |
1306 | m_maxsize(MC_BIGCL) = m_size(MC_BIGCL) = MBIGCLBYTES; | |
1307 | (void) snprintf(m_cname(MC_BIGCL), MAX_MBUF_CNAME, "bigcl"); | |
2d21ac55 A |
1308 | |
1309 | /* | |
6d2010ae | 1310 | * The remaining 31/32ths (s) are all-purpose (mbufs, 2KB, or 4KB) |
2d21ac55 | 1311 | */ |
6d2010ae A |
1312 | m_minlimit(MC_MBUF) = 0; |
1313 | m_maxlimit(MC_MBUF) = (s << NMBPCLSHIFT); /* in mbuf unit */ | |
1314 | m_maxsize(MC_MBUF) = m_size(MC_MBUF) = MSIZE; | |
1315 | (void) snprintf(m_cname(MC_MBUF), MAX_MBUF_CNAME, "mbuf"); | |
2d21ac55 A |
1316 | |
1317 | /* | |
1318 | * Set limits for the composite classes. | |
1319 | */ | |
1320 | m_minlimit(MC_MBUF_CL) = 0; | |
6d2010ae | 1321 | m_maxlimit(MC_MBUF_CL) = m_maxlimit(MC_CL); |
2d21ac55 A |
1322 | m_maxsize(MC_MBUF_CL) = MCLBYTES; |
1323 | m_size(MC_MBUF_CL) = m_size(MC_MBUF) + m_size(MC_CL); | |
1324 | (void) snprintf(m_cname(MC_MBUF_CL), MAX_MBUF_CNAME, "mbuf_cl"); | |
1325 | ||
1326 | m_minlimit(MC_MBUF_BIGCL) = 0; | |
1327 | m_maxlimit(MC_MBUF_BIGCL) = m_maxlimit(MC_BIGCL); | |
6d2010ae | 1328 | m_maxsize(MC_MBUF_BIGCL) = MBIGCLBYTES; |
2d21ac55 A |
1329 | m_size(MC_MBUF_BIGCL) = m_size(MC_MBUF) + m_size(MC_BIGCL); |
1330 | (void) snprintf(m_cname(MC_MBUF_BIGCL), MAX_MBUF_CNAME, "mbuf_bigcl"); | |
1331 | ||
1332 | /* | |
1333 | * And for jumbo classes. | |
1334 | */ | |
1335 | m_minlimit(MC_16KCL) = 0; | |
6d2010ae | 1336 | m_maxlimit(MC_16KCL) = (njcl >> NCLPJCLSHIFT); /* in 16KB unit */ |
2d21ac55 A |
1337 | m_maxsize(MC_16KCL) = m_size(MC_16KCL) = M16KCLBYTES; |
1338 | (void) snprintf(m_cname(MC_16KCL), MAX_MBUF_CNAME, "16kcl"); | |
1339 | ||
1340 | m_minlimit(MC_MBUF_16KCL) = 0; | |
1341 | m_maxlimit(MC_MBUF_16KCL) = m_maxlimit(MC_16KCL); | |
1342 | m_maxsize(MC_MBUF_16KCL) = M16KCLBYTES; | |
1343 | m_size(MC_MBUF_16KCL) = m_size(MC_MBUF) + m_size(MC_16KCL); | |
1344 | (void) snprintf(m_cname(MC_MBUF_16KCL), MAX_MBUF_CNAME, "mbuf_16kcl"); | |
1345 | ||
1346 | /* | |
1347 | * Initialize the legacy mbstat structure. | |
1348 | */ | |
1349 | bzero(&mbstat, sizeof (mbstat)); | |
1350 | mbstat.m_msize = m_maxsize(MC_MBUF); | |
1351 | mbstat.m_mclbytes = m_maxsize(MC_CL); | |
1352 | mbstat.m_minclsize = MINCLSIZE; | |
1353 | mbstat.m_mlen = MLEN; | |
1354 | mbstat.m_mhlen = MHLEN; | |
1355 | mbstat.m_bigmclbytes = m_maxsize(MC_BIGCL); | |
1356 | } | |
1357 | ||
b0d623f7 A |
1358 | #if defined(__LP64__) |
1359 | typedef struct ncl_tbl { | |
1360 | uint64_t nt_maxmem; /* memory (sane) size */ | |
1361 | uint32_t nt_mbpool; /* mbuf pool size */ | |
1362 | } ncl_tbl_t; | |
1363 | ||
1364 | /* Non-server */ | |
1365 | static ncl_tbl_t ncl_table[] = { | |
316670eb | 1366 | { (1ULL << GBSHIFT) /* 1 GB */, (64 << MBSHIFT) /* 64 MB */ }, |
b0d623f7 A |
1367 | { (1ULL << (GBSHIFT + 3)) /* 8 GB */, (96 << MBSHIFT) /* 96 MB */ }, |
1368 | { (1ULL << (GBSHIFT + 4)) /* 16 GB */, (128 << MBSHIFT) /* 128 MB */ }, | |
1369 | { 0, 0 } | |
1370 | }; | |
1371 | ||
1372 | /* Server */ | |
1373 | static ncl_tbl_t ncl_table_srv[] = { | |
316670eb | 1374 | { (1ULL << GBSHIFT) /* 1 GB */, (96 << MBSHIFT) /* 96 MB */ }, |
b0d623f7 A |
1375 | { (1ULL << (GBSHIFT + 2)) /* 4 GB */, (128 << MBSHIFT) /* 128 MB */ }, |
1376 | { (1ULL << (GBSHIFT + 3)) /* 8 GB */, (160 << MBSHIFT) /* 160 MB */ }, | |
1377 | { (1ULL << (GBSHIFT + 4)) /* 16 GB */, (192 << MBSHIFT) /* 192 MB */ }, | |
1378 | { (1ULL << (GBSHIFT + 5)) /* 32 GB */, (256 << MBSHIFT) /* 256 MB */ }, | |
1379 | { (1ULL << (GBSHIFT + 6)) /* 64 GB */, (384 << MBSHIFT) /* 384 MB */ }, | |
1380 | { 0, 0 } | |
1381 | }; | |
1382 | #endif /* __LP64__ */ | |
1383 | ||
1384 | __private_extern__ unsigned int | |
6d2010ae | 1385 | mbuf_default_ncl(int server, uint64_t mem) |
b0d623f7 A |
1386 | { |
1387 | #if !defined(__LP64__) | |
6d2010ae | 1388 | #pragma unused(server) |
b0d623f7 A |
1389 | unsigned int n; |
1390 | /* | |
1391 | * 32-bit kernel (default to 64MB of mbuf pool for >= 1GB RAM). | |
1392 | */ | |
6d2010ae A |
1393 | if ((n = ((mem / 16) / MCLBYTES)) > 32768) |
1394 | n = 32768; | |
b0d623f7 A |
1395 | #else |
1396 | unsigned int n, i; | |
6d2010ae | 1397 | ncl_tbl_t *tbl = (server ? ncl_table_srv : ncl_table); |
b0d623f7 A |
1398 | /* |
1399 | * 64-bit kernel (mbuf pool size based on table). | |
1400 | */ | |
1401 | n = tbl[0].nt_mbpool; | |
1402 | for (i = 0; tbl[i].nt_mbpool != 0; i++) { | |
1403 | if (mem < tbl[i].nt_maxmem) | |
1404 | break; | |
1405 | n = tbl[i].nt_mbpool; | |
1406 | } | |
1407 | n >>= MCLSHIFT; | |
1408 | #endif /* !__LP64__ */ | |
1409 | return (n); | |
1410 | } | |
1411 | ||
2d21ac55 A |
1412 | __private_extern__ void |
1413 | mbinit(void) | |
1414 | { | |
1415 | unsigned int m; | |
6d2010ae | 1416 | unsigned int initmcl = 0; |
2d21ac55 | 1417 | void *buf; |
b0d623f7 | 1418 | thread_t thread = THREAD_NULL; |
2d21ac55 | 1419 | |
39236c6e A |
1420 | microuptime(&mb_start); |
1421 | ||
316670eb A |
1422 | /* |
1423 | * These MBUF_ values must be equal to their private counterparts. | |
1424 | */ | |
1425 | _CASSERT(MBUF_EXT == M_EXT); | |
1426 | _CASSERT(MBUF_PKTHDR == M_PKTHDR); | |
1427 | _CASSERT(MBUF_EOR == M_EOR); | |
1428 | _CASSERT(MBUF_LOOP == M_LOOP); | |
1429 | _CASSERT(MBUF_BCAST == M_BCAST); | |
1430 | _CASSERT(MBUF_MCAST == M_MCAST); | |
1431 | _CASSERT(MBUF_FRAG == M_FRAG); | |
1432 | _CASSERT(MBUF_FIRSTFRAG == M_FIRSTFRAG); | |
1433 | _CASSERT(MBUF_LASTFRAG == M_LASTFRAG); | |
1434 | _CASSERT(MBUF_PROMISC == M_PROMISC); | |
1435 | _CASSERT(MBUF_HASFCS == M_HASFCS); | |
1436 | ||
1437 | _CASSERT(MBUF_TYPE_FREE == MT_FREE); | |
1438 | _CASSERT(MBUF_TYPE_DATA == MT_DATA); | |
1439 | _CASSERT(MBUF_TYPE_HEADER == MT_HEADER); | |
1440 | _CASSERT(MBUF_TYPE_SOCKET == MT_SOCKET); | |
1441 | _CASSERT(MBUF_TYPE_PCB == MT_PCB); | |
1442 | _CASSERT(MBUF_TYPE_RTABLE == MT_RTABLE); | |
1443 | _CASSERT(MBUF_TYPE_HTABLE == MT_HTABLE); | |
1444 | _CASSERT(MBUF_TYPE_ATABLE == MT_ATABLE); | |
1445 | _CASSERT(MBUF_TYPE_SONAME == MT_SONAME); | |
1446 | _CASSERT(MBUF_TYPE_SOOPTS == MT_SOOPTS); | |
1447 | _CASSERT(MBUF_TYPE_FTABLE == MT_FTABLE); | |
1448 | _CASSERT(MBUF_TYPE_RIGHTS == MT_RIGHTS); | |
1449 | _CASSERT(MBUF_TYPE_IFADDR == MT_IFADDR); | |
1450 | _CASSERT(MBUF_TYPE_CONTROL == MT_CONTROL); | |
1451 | _CASSERT(MBUF_TYPE_OOBDATA == MT_OOBDATA); | |
1452 | ||
1453 | _CASSERT(MBUF_TSO_IPV4 == CSUM_TSO_IPV4); | |
1454 | _CASSERT(MBUF_TSO_IPV6 == CSUM_TSO_IPV6); | |
39236c6e | 1455 | _CASSERT(MBUF_CSUM_REQ_SUM16 == CSUM_PARTIAL); |
316670eb A |
1456 | _CASSERT(MBUF_CSUM_TCP_SUM16 == MBUF_CSUM_REQ_SUM16); |
1457 | _CASSERT(MBUF_CSUM_REQ_IP == CSUM_IP); | |
1458 | _CASSERT(MBUF_CSUM_REQ_TCP == CSUM_TCP); | |
1459 | _CASSERT(MBUF_CSUM_REQ_UDP == CSUM_UDP); | |
1460 | _CASSERT(MBUF_CSUM_REQ_TCPIPV6 == CSUM_TCPIPV6); | |
1461 | _CASSERT(MBUF_CSUM_REQ_UDPIPV6 == CSUM_UDPIPV6); | |
1462 | _CASSERT(MBUF_CSUM_DID_IP == CSUM_IP_CHECKED); | |
1463 | _CASSERT(MBUF_CSUM_IP_GOOD == CSUM_IP_VALID); | |
1464 | _CASSERT(MBUF_CSUM_DID_DATA == CSUM_DATA_VALID); | |
1465 | _CASSERT(MBUF_CSUM_PSEUDO_HDR == CSUM_PSEUDO_HDR); | |
1466 | ||
1467 | _CASSERT(MBUF_WAITOK == M_WAIT); | |
1468 | _CASSERT(MBUF_DONTWAIT == M_DONTWAIT); | |
1469 | _CASSERT(MBUF_COPYALL == M_COPYALL); | |
1470 | ||
316670eb A |
1471 | _CASSERT(MBUF_SC2TC(MBUF_SC_BK_SYS) == MBUF_TC_BK); |
1472 | _CASSERT(MBUF_SC2TC(MBUF_SC_BK) == MBUF_TC_BK); | |
1473 | _CASSERT(MBUF_SC2TC(MBUF_SC_BE) == MBUF_TC_BE); | |
1474 | _CASSERT(MBUF_SC2TC(MBUF_SC_RD) == MBUF_TC_BE); | |
1475 | _CASSERT(MBUF_SC2TC(MBUF_SC_OAM) == MBUF_TC_BE); | |
1476 | _CASSERT(MBUF_SC2TC(MBUF_SC_AV) == MBUF_TC_VI); | |
1477 | _CASSERT(MBUF_SC2TC(MBUF_SC_RV) == MBUF_TC_VI); | |
1478 | _CASSERT(MBUF_SC2TC(MBUF_SC_VI) == MBUF_TC_VI); | |
1479 | _CASSERT(MBUF_SC2TC(MBUF_SC_VO) == MBUF_TC_VO); | |
1480 | _CASSERT(MBUF_SC2TC(MBUF_SC_CTL) == MBUF_TC_VO); | |
1481 | ||
1482 | _CASSERT(MBUF_TC2SCVAL(MBUF_TC_BK) == SCVAL_BK); | |
1483 | _CASSERT(MBUF_TC2SCVAL(MBUF_TC_BE) == SCVAL_BE); | |
1484 | _CASSERT(MBUF_TC2SCVAL(MBUF_TC_VI) == SCVAL_VI); | |
1485 | _CASSERT(MBUF_TC2SCVAL(MBUF_TC_VO) == SCVAL_VO); | |
1486 | ||
39236c6e A |
1487 | /* Module specific scratch space (32-bit alignment requirement) */ |
1488 | _CASSERT(!(offsetof(struct mbuf, m_pkthdr.pkt_mpriv) % | |
1489 | sizeof (uint32_t))); | |
1490 | ||
1491 | /* Initialize random red zone cookie value */ | |
1492 | _CASSERT(sizeof (mb_redzone_cookie) == | |
1493 | sizeof (((struct pkthdr *)0)->redzone)); | |
1494 | read_random(&mb_redzone_cookie, sizeof (mb_redzone_cookie)); | |
1495 | ||
1496 | /* Make sure we don't save more than we should */ | |
1497 | _CASSERT(MCA_SAVED_MBUF_SIZE <= sizeof (struct mbuf)); | |
1498 | ||
2d21ac55 A |
1499 | if (nmbclusters == 0) |
1500 | nmbclusters = NMBCLUSTERS; | |
1501 | ||
6d2010ae A |
1502 | /* This should be a sane (at least even) value by now */ |
1503 | VERIFY(nmbclusters != 0 && !(nmbclusters & 0x1)); | |
1504 | ||
2d21ac55 A |
1505 | /* Setup the mbuf table */ |
1506 | mbuf_table_init(); | |
1507 | ||
1508 | /* Global lock for common layer */ | |
1509 | mbuf_mlock_grp_attr = lck_grp_attr_alloc_init(); | |
1510 | mbuf_mlock_grp = lck_grp_alloc_init("mbuf", mbuf_mlock_grp_attr); | |
1511 | mbuf_mlock_attr = lck_attr_alloc_init(); | |
316670eb | 1512 | lck_mtx_init(mbuf_mlock, mbuf_mlock_grp, mbuf_mlock_attr); |
2d21ac55 | 1513 | |
6d2010ae A |
1514 | /* |
1515 | * Allocate cluster slabs table: | |
1516 | * | |
1517 | * maxslabgrp = (N * 2048) / (1024 * 1024) | |
1518 | * | |
1519 | * Where N is nmbclusters rounded up to the nearest 512. This yields | |
1520 | * mcl_slab_g_t units, each one representing a MB of memory. | |
1521 | */ | |
1522 | maxslabgrp = | |
3e170ce0 | 1523 | (P2ROUNDUP(nmbclusters, (MBSIZE >> MCLSHIFT)) << MCLSHIFT) >> MBSHIFT; |
2d21ac55 A |
1524 | MALLOC(slabstbl, mcl_slabg_t **, maxslabgrp * sizeof (mcl_slabg_t *), |
1525 | M_TEMP, M_WAITOK | M_ZERO); | |
1526 | VERIFY(slabstbl != NULL); | |
1527 | ||
6d2010ae A |
1528 | /* |
1529 | * Allocate audit structures, if needed: | |
1530 | * | |
3e170ce0 | 1531 | * maxclaudit = (maxslabgrp * 1024 * 1024) / PAGE_SIZE |
6d2010ae A |
1532 | * |
1533 | * This yields mcl_audit_t units, each one representing a page. | |
1534 | */ | |
593a1d5f | 1535 | PE_parse_boot_argn("mbuf_debug", &mbuf_debug, sizeof (mbuf_debug)); |
2d21ac55 | 1536 | mbuf_debug |= mcache_getflags(); |
6d2010ae | 1537 | if (mbuf_debug & MCF_DEBUG) { |
3e170ce0 A |
1538 | int l; |
1539 | mcl_audit_t *mclad; | |
1540 | maxclaudit = ((maxslabgrp << MBSHIFT) >> PAGE_SHIFT); | |
6d2010ae A |
1541 | MALLOC(mclaudit, mcl_audit_t *, maxclaudit * sizeof (*mclaudit), |
1542 | M_TEMP, M_WAITOK | M_ZERO); | |
2d21ac55 | 1543 | VERIFY(mclaudit != NULL); |
3e170ce0 A |
1544 | for (l = 0, mclad = mclaudit; l < maxclaudit; l++) { |
1545 | MALLOC(mclad[l].cl_audit, mcache_audit_t **, | |
1546 | NMBPG * sizeof(mcache_audit_t *), | |
1547 | M_TEMP, M_WAITOK | M_ZERO); | |
1548 | VERIFY(mclad[l].cl_audit != NULL); | |
1549 | } | |
2d21ac55 A |
1550 | |
1551 | mcl_audit_con_cache = mcache_create("mcl_audit_contents", | |
39236c6e | 1552 | AUDIT_CONTENTS_SIZE, sizeof (u_int64_t), 0, MCR_SLEEP); |
2d21ac55 A |
1553 | VERIFY(mcl_audit_con_cache != NULL); |
1554 | } | |
6d2010ae A |
1555 | mclverify = (mbuf_debug & MCF_VERIFY); |
1556 | mcltrace = (mbuf_debug & MCF_TRACE); | |
1557 | mclfindleak = !(mbuf_debug & MCF_NOLEAKLOG); | |
316670eb | 1558 | mclexpleak = mclfindleak && (mbuf_debug & MCF_EXPLEAKLOG); |
6d2010ae A |
1559 | |
1560 | /* Enable mbuf leak logging, with a lock to protect the tables */ | |
1561 | ||
1562 | mleak_lock_grp_attr = lck_grp_attr_alloc_init(); | |
1563 | mleak_lock_grp = lck_grp_alloc_init("mleak_lock", mleak_lock_grp_attr); | |
1564 | mleak_lock_attr = lck_attr_alloc_init(); | |
316670eb | 1565 | lck_mtx_init(mleak_lock, mleak_lock_grp, mleak_lock_attr); |
6d2010ae A |
1566 | |
1567 | mleak_activate(); | |
2d21ac55 A |
1568 | |
1569 | /* Calculate the number of pages assigned to the cluster pool */ | |
3e170ce0 | 1570 | mcl_pages = (nmbclusters << MCLSHIFT) / PAGE_SIZE; |
b0d623f7 A |
1571 | MALLOC(mcl_paddr, ppnum_t *, mcl_pages * sizeof (ppnum_t), |
1572 | M_TEMP, M_WAITOK); | |
2d21ac55 A |
1573 | VERIFY(mcl_paddr != NULL); |
1574 | ||
1575 | /* Register with the I/O Bus mapper */ | |
1576 | mcl_paddr_base = IOMapperIOVMAlloc(mcl_pages); | |
b0d623f7 | 1577 | bzero((char *)mcl_paddr, mcl_pages * sizeof (ppnum_t)); |
2d21ac55 | 1578 | |
3e170ce0 A |
1579 | embutl = (mbutl + (nmbclusters * MCLBYTES)); |
1580 | VERIFY(((embutl - mbutl) % MBIGCLBYTES) == 0); | |
2d21ac55 | 1581 | |
6d2010ae | 1582 | /* Prime up the freelist */ |
593a1d5f | 1583 | PE_parse_boot_argn("initmcl", &initmcl, sizeof (initmcl)); |
6d2010ae A |
1584 | if (initmcl != 0) { |
1585 | initmcl >>= NCLPBGSHIFT; /* become a 4K unit */ | |
1586 | if (initmcl > m_maxlimit(MC_BIGCL)) | |
1587 | initmcl = m_maxlimit(MC_BIGCL); | |
1588 | } | |
1589 | if (initmcl < m_minlimit(MC_BIGCL)) | |
1590 | initmcl = m_minlimit(MC_BIGCL); | |
2d21ac55 A |
1591 | |
1592 | lck_mtx_lock(mbuf_mlock); | |
1593 | ||
6d2010ae A |
1594 | /* |
1595 | * For classes with non-zero minimum limits, populate their freelists | |
1596 | * so that m_total(class) is at least m_minlimit(class). | |
1597 | */ | |
1598 | VERIFY(m_total(MC_BIGCL) == 0 && m_minlimit(MC_BIGCL) != 0); | |
1599 | freelist_populate(m_class(MC_BIGCL), initmcl, M_WAIT); | |
1600 | VERIFY(m_total(MC_BIGCL) >= m_minlimit(MC_BIGCL)); | |
1601 | freelist_init(m_class(MC_CL)); | |
1602 | ||
1603 | for (m = 0; m < NELEM(mbuf_table); m++) { | |
1604 | /* Make sure we didn't miss any */ | |
1605 | VERIFY(m_minlimit(m_class(m)) == 0 || | |
1606 | m_total(m_class(m)) >= m_minlimit(m_class(m))); | |
fe8ab488 A |
1607 | |
1608 | /* populate the initial sizes and report from there on */ | |
1609 | m_peak(m_class(m)) = m_total(m_class(m)); | |
6d2010ae | 1610 | } |
fe8ab488 | 1611 | mb_peak_newreport = FALSE; |
2d21ac55 A |
1612 | |
1613 | lck_mtx_unlock(mbuf_mlock); | |
1614 | ||
6d2010ae A |
1615 | (void) kernel_thread_start((thread_continue_t)mbuf_worker_thread_init, |
1616 | NULL, &thread); | |
b0d623f7 | 1617 | thread_deallocate(thread); |
2d21ac55 A |
1618 | |
1619 | ref_cache = mcache_create("mext_ref", sizeof (struct ext_ref), | |
1620 | 0, 0, MCR_SLEEP); | |
1621 | ||
1622 | /* Create the cache for each class */ | |
1623 | for (m = 0; m < NELEM(mbuf_table); m++) { | |
6d2010ae | 1624 | void *allocfunc, *freefunc, *auditfunc, *logfunc; |
2d21ac55 A |
1625 | u_int32_t flags; |
1626 | ||
1627 | flags = mbuf_debug; | |
1628 | if (m_class(m) == MC_MBUF_CL || m_class(m) == MC_MBUF_BIGCL || | |
1629 | m_class(m) == MC_MBUF_16KCL) { | |
1630 | allocfunc = mbuf_cslab_alloc; | |
1631 | freefunc = mbuf_cslab_free; | |
1632 | auditfunc = mbuf_cslab_audit; | |
6d2010ae | 1633 | logfunc = mleak_logger; |
2d21ac55 A |
1634 | } else { |
1635 | allocfunc = mbuf_slab_alloc; | |
1636 | freefunc = mbuf_slab_free; | |
1637 | auditfunc = mbuf_slab_audit; | |
6d2010ae | 1638 | logfunc = mleak_logger; |
2d21ac55 A |
1639 | } |
1640 | ||
1641 | /* | |
1642 | * Disable per-CPU caches for jumbo classes if there | |
1643 | * is no jumbo cluster pool available in the system. | |
1644 | * The cache itself is still created (but will never | |
1645 | * be populated) since it simplifies the code. | |
1646 | */ | |
1647 | if ((m_class(m) == MC_MBUF_16KCL || m_class(m) == MC_16KCL) && | |
1648 | njcl == 0) | |
1649 | flags |= MCF_NOCPUCACHE; | |
1650 | ||
6d2010ae A |
1651 | if (!mclfindleak) |
1652 | flags |= MCF_NOLEAKLOG; | |
1653 | ||
2d21ac55 | 1654 | m_cache(m) = mcache_create_ext(m_cname(m), m_maxsize(m), |
6d2010ae | 1655 | allocfunc, freefunc, auditfunc, logfunc, mbuf_slab_notify, |
b0d623f7 | 1656 | (void *)(uintptr_t)m, flags, MCR_SLEEP); |
2d21ac55 A |
1657 | } |
1658 | ||
1659 | /* | |
1660 | * Allocate structure for per-CPU statistics that's aligned | |
1661 | * on the CPU cache boundary; this code assumes that we never | |
1662 | * uninitialize this framework, since the original address | |
1663 | * before alignment is not saved. | |
1664 | */ | |
1665 | ncpu = ml_get_max_cpus(); | |
39236c6e | 1666 | MALLOC(buf, void *, MBUF_MTYPES_SIZE(ncpu) + CPU_CACHE_LINE_SIZE, |
2d21ac55 A |
1667 | M_TEMP, M_WAITOK); |
1668 | VERIFY(buf != NULL); | |
1669 | ||
39236c6e A |
1670 | mbuf_mtypes = (mbuf_mtypes_t *)P2ROUNDUP((intptr_t)buf, |
1671 | CPU_CACHE_LINE_SIZE); | |
2d21ac55 A |
1672 | bzero(mbuf_mtypes, MBUF_MTYPES_SIZE(ncpu)); |
1673 | ||
6d2010ae A |
1674 | /* |
1675 | * Set the max limit on sb_max to be 1/16 th of the size of | |
b0d623f7 A |
1676 | * memory allocated for mbuf clusters. |
1677 | */ | |
6d2010ae | 1678 | high_sb_max = (nmbclusters << (MCLSHIFT - 4)); |
b0d623f7 A |
1679 | if (high_sb_max < sb_max) { |
1680 | /* sb_max is too large for this configuration, scale it down */ | |
6d2010ae | 1681 | if (high_sb_max > (1 << MBSHIFT)) { |
b0d623f7 A |
1682 | /* We have atleast 16 M of mbuf pool */ |
1683 | sb_max = high_sb_max; | |
1684 | } else if ((nmbclusters << MCLSHIFT) > (1 << MBSHIFT)) { | |
6d2010ae A |
1685 | /* |
1686 | * If we have more than 1M of mbufpool, cap the size of | |
b0d623f7 | 1687 | * max sock buf at 1M |
6d2010ae | 1688 | */ |
b0d623f7 A |
1689 | sb_max = high_sb_max = (1 << MBSHIFT); |
1690 | } else { | |
1691 | sb_max = high_sb_max; | |
1692 | } | |
1693 | } | |
1694 | ||
316670eb A |
1695 | /* allocate space for mbuf_dump_buf */ |
1696 | MALLOC(mbuf_dump_buf, char *, MBUF_DUMP_BUF_SIZE, M_TEMP, M_WAITOK); | |
1697 | VERIFY(mbuf_dump_buf != NULL); | |
1698 | ||
39236c6e A |
1699 | if (mbuf_debug & MCF_DEBUG) { |
1700 | printf("%s: MLEN %d, MHLEN %d\n", __func__, | |
1701 | (int)_MLEN, (int)_MHLEN); | |
1702 | } | |
1703 | ||
1704 | printf("%s: done [%d MB total pool size, (%d/%d) split]\n", __func__, | |
6d2010ae A |
1705 | (nmbclusters << MCLSHIFT) >> MBSHIFT, |
1706 | (nclusters << MCLSHIFT) >> MBSHIFT, | |
1707 | (njcl << MCLSHIFT) >> MBSHIFT); | |
39037602 A |
1708 | |
1709 | /* initialize lock form tx completion callback table */ | |
1710 | mbuf_tx_compl_tbl_lck_grp_attr = lck_grp_attr_alloc_init(); | |
1711 | if (mbuf_tx_compl_tbl_lck_grp_attr == NULL) { | |
1712 | panic("%s: lck_grp_attr_alloc_init failed", __func__); | |
1713 | /* NOTREACHED */ | |
1714 | } | |
1715 | mbuf_tx_compl_tbl_lck_grp = lck_grp_alloc_init("mbuf_tx_compl_tbl", | |
1716 | mbuf_tx_compl_tbl_lck_grp_attr); | |
1717 | if (mbuf_tx_compl_tbl_lck_grp == NULL) { | |
1718 | panic("%s: lck_grp_alloc_init failed", __func__); | |
1719 | /* NOTREACHED */ | |
1720 | } | |
1721 | mbuf_tx_compl_tbl_lck_attr = lck_attr_alloc_init(); | |
1722 | if (mbuf_tx_compl_tbl_lck_attr == NULL) { | |
1723 | panic("%s: lck_attr_alloc_init failed", __func__); | |
1724 | /* NOTREACHED */ | |
1725 | } | |
1726 | lck_rw_init(mbuf_tx_compl_tbl_lock, mbuf_tx_compl_tbl_lck_grp, | |
1727 | mbuf_tx_compl_tbl_lck_attr); | |
1728 | ||
2d21ac55 A |
1729 | } |
1730 | ||
1731 | /* | |
1732 | * Obtain a slab of object(s) from the class's freelist. | |
1733 | */ | |
1734 | static mcache_obj_t * | |
1735 | slab_alloc(mbuf_class_t class, int wait) | |
1736 | { | |
1737 | mcl_slab_t *sp; | |
1738 | mcache_obj_t *buf; | |
1739 | ||
1740 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
1741 | ||
2d21ac55 A |
1742 | /* This should always be NULL for us */ |
1743 | VERIFY(m_cobjlist(class) == NULL); | |
1744 | ||
1745 | /* | |
1746 | * Treat composite objects as having longer lifespan by using | |
1747 | * a slab from the reverse direction, in hoping that this could | |
1748 | * reduce the probability of fragmentation for slabs that hold | |
1749 | * more than one buffer chunks (e.g. mbuf slabs). For other | |
1750 | * slabs, this probably doesn't make much of a difference. | |
1751 | */ | |
3e170ce0 A |
1752 | if ((class == MC_MBUF || class == MC_CL || class == MC_BIGCL) |
1753 | && (wait & MCR_COMP)) | |
2d21ac55 A |
1754 | sp = (mcl_slab_t *)TAILQ_LAST(&m_slablist(class), mcl_slhead); |
1755 | else | |
1756 | sp = (mcl_slab_t *)TAILQ_FIRST(&m_slablist(class)); | |
1757 | ||
1758 | if (sp == NULL) { | |
1759 | VERIFY(m_infree(class) == 0 && m_slab_cnt(class) == 0); | |
1760 | /* The slab list for this class is empty */ | |
1761 | return (NULL); | |
1762 | } | |
1763 | ||
1764 | VERIFY(m_infree(class) > 0); | |
1765 | VERIFY(!slab_is_detached(sp)); | |
1766 | VERIFY(sp->sl_class == class && | |
1767 | (sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED); | |
1768 | buf = sp->sl_head; | |
1769 | VERIFY(slab_inrange(sp, buf) && sp == slab_get(buf)); | |
3e170ce0 A |
1770 | sp->sl_head = buf->obj_next; |
1771 | /* Increment slab reference */ | |
1772 | sp->sl_refcnt++; | |
1773 | ||
1774 | VERIFY(sp->sl_head != NULL || sp->sl_refcnt == sp->sl_chunks); | |
2d21ac55 | 1775 | |
2d21ac55 A |
1776 | if (sp->sl_head != NULL && !slab_inrange(sp, sp->sl_head)) { |
1777 | slab_nextptr_panic(sp, sp->sl_head); | |
1778 | /* In case sl_head is in the map but not in the slab */ | |
1779 | VERIFY(slab_inrange(sp, sp->sl_head)); | |
1780 | /* NOTREACHED */ | |
1781 | } | |
1782 | ||
2d21ac55 A |
1783 | if (mclaudit != NULL) { |
1784 | mcache_audit_t *mca = mcl_audit_buf2mca(class, buf); | |
1785 | mca->mca_uflags = 0; | |
1786 | /* Save contents on mbuf objects only */ | |
1787 | if (class == MC_MBUF) | |
1788 | mca->mca_uflags |= MB_SCVALID; | |
1789 | } | |
1790 | ||
1791 | if (class == MC_CL) { | |
1792 | mbstat.m_clfree = (--m_infree(MC_CL)) + m_infree(MC_MBUF_CL); | |
1793 | /* | |
3e170ce0 | 1794 | * A 2K cluster slab can have at most NCLPG references. |
2d21ac55 | 1795 | */ |
3e170ce0 A |
1796 | VERIFY(sp->sl_refcnt >= 1 && sp->sl_refcnt <= NCLPG && |
1797 | sp->sl_chunks == NCLPG && sp->sl_len == PAGE_SIZE); | |
1798 | VERIFY(sp->sl_refcnt < NCLPG || sp->sl_head == NULL); | |
2d21ac55 | 1799 | } else if (class == MC_BIGCL) { |
2d21ac55 A |
1800 | mbstat.m_bigclfree = (--m_infree(MC_BIGCL)) + |
1801 | m_infree(MC_MBUF_BIGCL); | |
1802 | /* | |
3e170ce0 | 1803 | * A 4K cluster slab can have NBCLPG references. |
2d21ac55 | 1804 | */ |
3e170ce0 | 1805 | VERIFY(sp->sl_refcnt >= 1 && sp->sl_chunks == NBCLPG && |
39037602 | 1806 | sp->sl_len == PAGE_SIZE && |
3e170ce0 | 1807 | (sp->sl_refcnt < NBCLPG || sp->sl_head == NULL)); |
2d21ac55 A |
1808 | } else if (class == MC_16KCL) { |
1809 | mcl_slab_t *nsp; | |
1810 | int k; | |
1811 | ||
1812 | --m_infree(MC_16KCL); | |
1813 | VERIFY(sp->sl_refcnt == 1 && sp->sl_chunks == 1 && | |
6d2010ae | 1814 | sp->sl_len == m_maxsize(class) && sp->sl_head == NULL); |
2d21ac55 | 1815 | /* |
6d2010ae A |
1816 | * Increment 2nd-Nth slab reference, where N is NSLABSP16KB. |
1817 | * A 16KB big cluster takes NSLABSP16KB slabs, each having at | |
1818 | * most 1 reference. | |
2d21ac55 | 1819 | */ |
6d2010ae | 1820 | for (nsp = sp, k = 1; k < NSLABSP16KB; k++) { |
2d21ac55 A |
1821 | nsp = nsp->sl_next; |
1822 | /* Next slab must already be present */ | |
1823 | VERIFY(nsp != NULL); | |
1824 | nsp->sl_refcnt++; | |
1825 | VERIFY(!slab_is_detached(nsp)); | |
1826 | VERIFY(nsp->sl_class == MC_16KCL && | |
1827 | nsp->sl_flags == (SLF_MAPPED | SLF_PARTIAL) && | |
1828 | nsp->sl_refcnt == 1 && nsp->sl_chunks == 0 && | |
1829 | nsp->sl_len == 0 && nsp->sl_base == sp->sl_base && | |
1830 | nsp->sl_head == NULL); | |
1831 | } | |
1832 | } else { | |
6d2010ae | 1833 | VERIFY(class == MC_MBUF); |
2d21ac55 A |
1834 | --m_infree(MC_MBUF); |
1835 | /* | |
1836 | * If auditing is turned on, this check is | |
1837 | * deferred until later in mbuf_slab_audit(). | |
1838 | */ | |
1839 | if (mclaudit == NULL) | |
1840 | _MCHECK((struct mbuf *)buf); | |
1841 | /* | |
1842 | * Since we have incremented the reference count above, | |
6d2010ae | 1843 | * an mbuf slab (formerly a 4KB cluster slab that was cut |
2d21ac55 | 1844 | * up into mbufs) must have a reference count between 1 |
3e170ce0 | 1845 | * and NMBPG at this point. |
2d21ac55 | 1846 | */ |
3e170ce0 A |
1847 | VERIFY(sp->sl_refcnt >= 1 && sp->sl_refcnt <= NMBPG && |
1848 | sp->sl_chunks == NMBPG && | |
1849 | sp->sl_len == PAGE_SIZE); | |
1850 | VERIFY(sp->sl_refcnt < NMBPG || sp->sl_head == NULL); | |
2d21ac55 A |
1851 | } |
1852 | ||
1853 | /* If empty, remove this slab from the class's freelist */ | |
1854 | if (sp->sl_head == NULL) { | |
3e170ce0 A |
1855 | VERIFY(class != MC_MBUF || sp->sl_refcnt == NMBPG); |
1856 | VERIFY(class != MC_CL || sp->sl_refcnt == NCLPG); | |
1857 | VERIFY(class != MC_BIGCL || sp->sl_refcnt == NBCLPG); | |
2d21ac55 A |
1858 | slab_remove(sp, class); |
1859 | } | |
1860 | ||
1861 | return (buf); | |
1862 | } | |
1863 | ||
1864 | /* | |
1865 | * Place a slab of object(s) back into a class's slab list. | |
1866 | */ | |
1867 | static void | |
1868 | slab_free(mbuf_class_t class, mcache_obj_t *buf) | |
1869 | { | |
1870 | mcl_slab_t *sp; | |
3e170ce0 A |
1871 | boolean_t reinit_supercl = false; |
1872 | mbuf_class_t super_class; | |
2d21ac55 A |
1873 | |
1874 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
1875 | ||
1876 | VERIFY(class != MC_16KCL || njcl > 0); | |
1877 | VERIFY(buf->obj_next == NULL); | |
3e170ce0 | 1878 | |
2d21ac55 A |
1879 | sp = slab_get(buf); |
1880 | VERIFY(sp->sl_class == class && slab_inrange(sp, buf) && | |
1881 | (sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED); | |
1882 | ||
1883 | /* Decrement slab reference */ | |
1884 | sp->sl_refcnt--; | |
1885 | ||
6d2010ae | 1886 | if (class == MC_CL) { |
2d21ac55 A |
1887 | VERIFY(IS_P2ALIGNED(buf, MCLBYTES)); |
1888 | /* | |
6d2010ae A |
1889 | * A slab that has been splitted for 2KB clusters can have |
1890 | * at most 1 outstanding reference at this point. | |
1891 | */ | |
3e170ce0 A |
1892 | VERIFY(sp->sl_refcnt >= 0 && sp->sl_refcnt <= (NCLPG - 1) && |
1893 | sp->sl_chunks == NCLPG && sp->sl_len == PAGE_SIZE); | |
1894 | VERIFY(sp->sl_refcnt < (NCLPG - 1) || | |
6d2010ae A |
1895 | (slab_is_detached(sp) && sp->sl_head == NULL)); |
1896 | } else if (class == MC_BIGCL) { | |
3e170ce0 A |
1897 | VERIFY(IS_P2ALIGNED(buf, MBIGCLBYTES)); |
1898 | ||
1899 | /* A 4KB cluster slab can have NBCLPG references at most */ | |
1900 | VERIFY(sp->sl_refcnt >= 0 && sp->sl_chunks == NBCLPG); | |
1901 | VERIFY(sp->sl_refcnt < (NBCLPG - 1) || | |
1902 | (slab_is_detached(sp) && sp->sl_head == NULL)); | |
2d21ac55 A |
1903 | } else if (class == MC_16KCL) { |
1904 | mcl_slab_t *nsp; | |
1905 | int k; | |
1906 | /* | |
6d2010ae | 1907 | * A 16KB cluster takes NSLABSP16KB slabs, all must |
2d21ac55 A |
1908 | * now have 0 reference. |
1909 | */ | |
3e170ce0 | 1910 | VERIFY(IS_P2ALIGNED(buf, PAGE_SIZE)); |
2d21ac55 | 1911 | VERIFY(sp->sl_refcnt == 0 && sp->sl_chunks == 1 && |
6d2010ae | 1912 | sp->sl_len == m_maxsize(class) && sp->sl_head == NULL); |
2d21ac55 | 1913 | VERIFY(slab_is_detached(sp)); |
6d2010ae | 1914 | for (nsp = sp, k = 1; k < NSLABSP16KB; k++) { |
2d21ac55 A |
1915 | nsp = nsp->sl_next; |
1916 | /* Next slab must already be present */ | |
1917 | VERIFY(nsp != NULL); | |
1918 | nsp->sl_refcnt--; | |
1919 | VERIFY(slab_is_detached(nsp)); | |
1920 | VERIFY(nsp->sl_class == MC_16KCL && | |
1921 | (nsp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) && | |
1922 | nsp->sl_refcnt == 0 && nsp->sl_chunks == 0 && | |
1923 | nsp->sl_len == 0 && nsp->sl_base == sp->sl_base && | |
1924 | nsp->sl_head == NULL); | |
1925 | } | |
1926 | } else { | |
1927 | /* | |
3e170ce0 A |
1928 | * A slab that has been splitted for mbufs has at most |
1929 | * NMBPG reference counts. Since we have decremented | |
1930 | * one reference above, it must now be between 0 and | |
1931 | * NMBPG-1. | |
2d21ac55 | 1932 | */ |
6d2010ae | 1933 | VERIFY(class == MC_MBUF); |
3e170ce0 A |
1934 | VERIFY(sp->sl_refcnt >= 0 && |
1935 | sp->sl_refcnt <= (NMBPG - 1) && | |
1936 | sp->sl_chunks == NMBPG && | |
1937 | sp->sl_len == PAGE_SIZE); | |
1938 | VERIFY(sp->sl_refcnt < (NMBPG - 1) || | |
2d21ac55 A |
1939 | (slab_is_detached(sp) && sp->sl_head == NULL)); |
1940 | } | |
1941 | ||
1942 | /* | |
1943 | * When auditing is enabled, ensure that the buffer still | |
1944 | * contains the free pattern. Otherwise it got corrupted | |
1945 | * while at the CPU cache layer. | |
1946 | */ | |
1947 | if (mclaudit != NULL) { | |
1948 | mcache_audit_t *mca = mcl_audit_buf2mca(class, buf); | |
6d2010ae | 1949 | if (mclverify) { |
3e170ce0 A |
1950 | mcache_audit_free_verify(mca, buf, 0, |
1951 | m_maxsize(class)); | |
6d2010ae | 1952 | } |
2d21ac55 A |
1953 | mca->mca_uflags &= ~MB_SCVALID; |
1954 | } | |
1955 | ||
1956 | if (class == MC_CL) { | |
1957 | mbstat.m_clfree = (++m_infree(MC_CL)) + m_infree(MC_MBUF_CL); | |
6d2010ae | 1958 | buf->obj_next = sp->sl_head; |
2d21ac55 A |
1959 | } else if (class == MC_BIGCL) { |
1960 | mbstat.m_bigclfree = (++m_infree(MC_BIGCL)) + | |
1961 | m_infree(MC_MBUF_BIGCL); | |
3e170ce0 | 1962 | buf->obj_next = sp->sl_head; |
2d21ac55 A |
1963 | } else if (class == MC_16KCL) { |
1964 | ++m_infree(MC_16KCL); | |
1965 | } else { | |
1966 | ++m_infree(MC_MBUF); | |
1967 | buf->obj_next = sp->sl_head; | |
1968 | } | |
1969 | sp->sl_head = buf; | |
1970 | ||
6d2010ae | 1971 | /* |
3e170ce0 A |
1972 | * If a slab has been split to either one which holds 2KB clusters, |
1973 | * or one which holds mbufs, turn it back to one which holds a | |
1974 | * 4 or 16 KB cluster depending on the page size. | |
6d2010ae | 1975 | */ |
3e170ce0 A |
1976 | if (m_maxsize(MC_BIGCL) == PAGE_SIZE) { |
1977 | super_class = MC_BIGCL; | |
1978 | } else { | |
1979 | VERIFY(PAGE_SIZE == m_maxsize(MC_16KCL)); | |
1980 | super_class = MC_16KCL; | |
1981 | } | |
6d2010ae | 1982 | if (class == MC_MBUF && sp->sl_refcnt == 0 && |
3e170ce0 A |
1983 | m_total(class) >= (m_minlimit(class) + NMBPG) && |
1984 | m_total(super_class) < m_maxlimit(super_class)) { | |
1985 | int i = NMBPG; | |
6d2010ae | 1986 | |
3e170ce0 | 1987 | m_total(MC_MBUF) -= NMBPG; |
2d21ac55 | 1988 | mbstat.m_mbufs = m_total(MC_MBUF); |
3e170ce0 A |
1989 | m_infree(MC_MBUF) -= NMBPG; |
1990 | mtype_stat_add(MT_FREE, -((unsigned)NMBPG)); | |
2d21ac55 A |
1991 | |
1992 | while (i--) { | |
1993 | struct mbuf *m = sp->sl_head; | |
1994 | VERIFY(m != NULL); | |
1995 | sp->sl_head = m->m_next; | |
1996 | m->m_next = NULL; | |
1997 | } | |
3e170ce0 | 1998 | reinit_supercl = true; |
6d2010ae | 1999 | } else if (class == MC_CL && sp->sl_refcnt == 0 && |
3e170ce0 A |
2000 | m_total(class) >= (m_minlimit(class) + NCLPG) && |
2001 | m_total(super_class) < m_maxlimit(super_class)) { | |
2002 | int i = NCLPG; | |
6d2010ae | 2003 | |
3e170ce0 | 2004 | m_total(MC_CL) -= NCLPG; |
6d2010ae | 2005 | mbstat.m_clusters = m_total(MC_CL); |
3e170ce0 | 2006 | m_infree(MC_CL) -= NCLPG; |
6d2010ae A |
2007 | |
2008 | while (i--) { | |
2009 | union mcluster *c = sp->sl_head; | |
2010 | VERIFY(c != NULL); | |
2011 | sp->sl_head = c->mcl_next; | |
2012 | c->mcl_next = NULL; | |
2013 | } | |
3e170ce0 A |
2014 | reinit_supercl = true; |
2015 | } else if (class == MC_BIGCL && super_class != MC_BIGCL && | |
2016 | sp->sl_refcnt == 0 && | |
2017 | m_total(class) >= (m_minlimit(class) + NBCLPG) && | |
2018 | m_total(super_class) < m_maxlimit(super_class)) { | |
2019 | int i = NBCLPG; | |
2020 | ||
2021 | VERIFY(super_class == MC_16KCL); | |
2022 | m_total(MC_BIGCL) -= NBCLPG; | |
2023 | mbstat.m_bigclusters = m_total(MC_BIGCL); | |
2024 | m_infree(MC_BIGCL) -= NBCLPG; | |
6d2010ae | 2025 | |
3e170ce0 A |
2026 | while (i--) { |
2027 | union mbigcluster *bc = sp->sl_head; | |
2028 | VERIFY(bc != NULL); | |
2029 | sp->sl_head = bc->mbc_next; | |
2030 | bc->mbc_next = NULL; | |
2031 | } | |
2032 | reinit_supercl = true; | |
2033 | } | |
2034 | ||
2035 | if (reinit_supercl) { | |
2036 | VERIFY(sp->sl_head == NULL); | |
2037 | VERIFY(m_total(class) >= m_minlimit(class)); | |
6d2010ae A |
2038 | slab_remove(sp, class); |
2039 | ||
3e170ce0 A |
2040 | /* Reinitialize it as a cluster for the super class */ |
2041 | m_total(super_class)++; | |
2042 | m_infree(super_class)++; | |
2043 | VERIFY(sp->sl_flags == (SLF_MAPPED | SLF_DETACHED) && | |
2044 | sp->sl_len == PAGE_SIZE && sp->sl_refcnt == 0); | |
6d2010ae | 2045 | |
3e170ce0 A |
2046 | slab_init(sp, super_class, SLF_MAPPED, sp->sl_base, |
2047 | sp->sl_base, PAGE_SIZE, 0, 1); | |
2048 | if (mclverify) | |
6d2010ae | 2049 | mcache_set_pattern(MCACHE_FREE_PATTERN, |
3e170ce0 A |
2050 | (caddr_t)sp->sl_base, sp->sl_len); |
2051 | ((mcache_obj_t *)(sp->sl_base))->obj_next = NULL; | |
2052 | ||
2053 | if (super_class == MC_BIGCL) { | |
2054 | mbstat.m_bigclusters = m_total(MC_BIGCL); | |
2055 | mbstat.m_bigclfree = m_infree(MC_BIGCL) + | |
2056 | m_infree(MC_MBUF_BIGCL); | |
6d2010ae | 2057 | } |
2d21ac55 A |
2058 | |
2059 | VERIFY(slab_is_detached(sp)); | |
3e170ce0 A |
2060 | VERIFY(m_total(super_class) <= m_maxlimit(super_class)); |
2061 | ||
2d21ac55 | 2062 | /* And finally switch class */ |
3e170ce0 | 2063 | class = super_class; |
2d21ac55 A |
2064 | } |
2065 | ||
2066 | /* Reinsert the slab to the class's slab list */ | |
2067 | if (slab_is_detached(sp)) | |
2068 | slab_insert(sp, class); | |
2069 | } | |
2070 | ||
2071 | /* | |
2072 | * Common allocator for rudimentary objects called by the CPU cache layer | |
2073 | * during an allocation request whenever there is no available element in the | |
2074 | * bucket layer. It returns one or more elements from the appropriate global | |
2075 | * freelist. If the freelist is empty, it will attempt to populate it and | |
2076 | * retry the allocation. | |
2077 | */ | |
2078 | static unsigned int | |
2079 | mbuf_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num, int wait) | |
2080 | { | |
2081 | mbuf_class_t class = (mbuf_class_t)arg; | |
2082 | unsigned int need = num; | |
2083 | mcache_obj_t **list = *plist; | |
2084 | ||
2085 | ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class)); | |
2086 | ASSERT(need > 0); | |
2087 | ||
2088 | lck_mtx_lock(mbuf_mlock); | |
2089 | ||
2090 | for (;;) { | |
2091 | if ((*list = slab_alloc(class, wait)) != NULL) { | |
2092 | (*list)->obj_next = NULL; | |
2093 | list = *plist = &(*list)->obj_next; | |
2094 | ||
2095 | if (--need == 0) { | |
2096 | /* | |
2097 | * If the number of elements in freelist has | |
2098 | * dropped below low watermark, asynchronously | |
2099 | * populate the freelist now rather than doing | |
2100 | * it later when we run out of elements. | |
2101 | */ | |
2102 | if (!mbuf_cached_above(class, wait) && | |
3e170ce0 | 2103 | m_infree(class) < (m_total(class) >> 5)) { |
2d21ac55 A |
2104 | (void) freelist_populate(class, 1, |
2105 | M_DONTWAIT); | |
2106 | } | |
2107 | break; | |
2108 | } | |
2109 | } else { | |
2110 | VERIFY(m_infree(class) == 0 || class == MC_CL); | |
2111 | ||
2112 | (void) freelist_populate(class, 1, | |
2113 | (wait & MCR_NOSLEEP) ? M_DONTWAIT : M_WAIT); | |
2114 | ||
2115 | if (m_infree(class) > 0) | |
2116 | continue; | |
2117 | ||
2118 | /* Check if there's anything at the cache layer */ | |
2119 | if (mbuf_cached_above(class, wait)) | |
2120 | break; | |
2121 | ||
6d2010ae A |
2122 | /* watchdog checkpoint */ |
2123 | mbuf_watchdog(); | |
2124 | ||
2d21ac55 A |
2125 | /* We have nothing and cannot block; give up */ |
2126 | if (wait & MCR_NOSLEEP) { | |
2127 | if (!(wait & MCR_TRYHARD)) { | |
2128 | m_fail_cnt(class)++; | |
2129 | mbstat.m_drops++; | |
2130 | break; | |
2131 | } | |
2132 | } | |
2133 | ||
2134 | /* | |
2135 | * If the freelist is still empty and the caller is | |
2136 | * willing to be blocked, sleep on the wait channel | |
2137 | * until an element is available. Otherwise, if | |
2138 | * MCR_TRYHARD is set, do our best to satisfy the | |
2139 | * request without having to go to sleep. | |
2140 | */ | |
2141 | if (mbuf_worker_ready && | |
2142 | mbuf_sleep(class, need, wait)) | |
2143 | break; | |
2144 | ||
2145 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
2146 | } | |
2147 | } | |
2148 | ||
2149 | m_alloc_cnt(class) += num - need; | |
2150 | lck_mtx_unlock(mbuf_mlock); | |
2151 | ||
2152 | return (num - need); | |
2153 | } | |
2154 | ||
2155 | /* | |
2156 | * Common de-allocator for rudimentary objects called by the CPU cache | |
2157 | * layer when one or more elements need to be returned to the appropriate | |
2158 | * global freelist. | |
2159 | */ | |
2160 | static void | |
2161 | mbuf_slab_free(void *arg, mcache_obj_t *list, __unused int purged) | |
2162 | { | |
2163 | mbuf_class_t class = (mbuf_class_t)arg; | |
2164 | mcache_obj_t *nlist; | |
2165 | unsigned int num = 0; | |
2166 | int w; | |
2167 | ||
2168 | ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class)); | |
2169 | ||
2170 | lck_mtx_lock(mbuf_mlock); | |
2171 | ||
2172 | for (;;) { | |
2173 | nlist = list->obj_next; | |
2174 | list->obj_next = NULL; | |
2175 | slab_free(class, list); | |
2176 | ++num; | |
2177 | if ((list = nlist) == NULL) | |
2178 | break; | |
2179 | } | |
2180 | m_free_cnt(class) += num; | |
2181 | ||
2182 | if ((w = mb_waiters) > 0) | |
2183 | mb_waiters = 0; | |
2184 | ||
2185 | lck_mtx_unlock(mbuf_mlock); | |
2186 | ||
2187 | if (w != 0) | |
2188 | wakeup(mb_waitchan); | |
2189 | } | |
2190 | ||
2191 | /* | |
2192 | * Common auditor for rudimentary objects called by the CPU cache layer | |
2193 | * during an allocation or free request. For the former, this is called | |
2194 | * after the objects are obtained from either the bucket or slab layer | |
2195 | * and before they are returned to the caller. For the latter, this is | |
2196 | * called immediately during free and before placing the objects into | |
2197 | * the bucket or slab layer. | |
2198 | */ | |
2199 | static void | |
2200 | mbuf_slab_audit(void *arg, mcache_obj_t *list, boolean_t alloc) | |
2201 | { | |
2202 | mbuf_class_t class = (mbuf_class_t)arg; | |
2203 | mcache_audit_t *mca; | |
2204 | ||
2205 | ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class)); | |
2206 | ||
2207 | while (list != NULL) { | |
2208 | lck_mtx_lock(mbuf_mlock); | |
2209 | mca = mcl_audit_buf2mca(class, list); | |
2210 | ||
2211 | /* Do the sanity checks */ | |
2212 | if (class == MC_MBUF) { | |
2213 | mcl_audit_mbuf(mca, list, FALSE, alloc); | |
2214 | ASSERT(mca->mca_uflags & MB_SCVALID); | |
2215 | } else { | |
2216 | mcl_audit_cluster(mca, list, m_maxsize(class), | |
2217 | alloc, TRUE); | |
2218 | ASSERT(!(mca->mca_uflags & MB_SCVALID)); | |
2219 | } | |
2220 | /* Record this transaction */ | |
6d2010ae | 2221 | if (mcltrace) |
39236c6e | 2222 | mcache_buffer_log(mca, list, m_cache(class), &mb_start); |
6d2010ae | 2223 | |
2d21ac55 A |
2224 | if (alloc) |
2225 | mca->mca_uflags |= MB_INUSE; | |
2226 | else | |
2227 | mca->mca_uflags &= ~MB_INUSE; | |
2228 | /* Unpair the object (unconditionally) */ | |
2229 | mca->mca_uptr = NULL; | |
2230 | lck_mtx_unlock(mbuf_mlock); | |
2231 | ||
2232 | list = list->obj_next; | |
2233 | } | |
2234 | } | |
2235 | ||
2236 | /* | |
2237 | * Common notify routine for all caches. It is called by mcache when | |
2238 | * one or more objects get freed. We use this indication to trigger | |
2239 | * the wakeup of any sleeping threads so that they can retry their | |
2240 | * allocation requests. | |
2241 | */ | |
2242 | static void | |
2243 | mbuf_slab_notify(void *arg, u_int32_t reason) | |
2244 | { | |
2245 | mbuf_class_t class = (mbuf_class_t)arg; | |
2246 | int w; | |
2247 | ||
2248 | ASSERT(MBUF_CLASS_VALID(class)); | |
2249 | ||
2250 | if (reason != MCN_RETRYALLOC) | |
2251 | return; | |
2252 | ||
2253 | lck_mtx_lock(mbuf_mlock); | |
2254 | if ((w = mb_waiters) > 0) { | |
2255 | m_notified(class)++; | |
2256 | mb_waiters = 0; | |
2257 | } | |
2258 | lck_mtx_unlock(mbuf_mlock); | |
2259 | ||
2260 | if (w != 0) | |
2261 | wakeup(mb_waitchan); | |
2262 | } | |
2263 | ||
2264 | /* | |
2265 | * Obtain object(s) from the composite class's freelist. | |
2266 | */ | |
2267 | static unsigned int | |
2268 | cslab_alloc(mbuf_class_t class, mcache_obj_t ***plist, unsigned int num) | |
2269 | { | |
2270 | unsigned int need = num; | |
2271 | mcl_slab_t *sp, *clsp, *nsp; | |
2272 | struct mbuf *m; | |
2273 | mcache_obj_t **list = *plist; | |
2274 | void *cl; | |
2275 | ||
2276 | VERIFY(need > 0); | |
2277 | VERIFY(class != MC_MBUF_16KCL || njcl > 0); | |
2278 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
2279 | ||
2280 | /* Get what we can from the freelist */ | |
2281 | while ((*list = m_cobjlist(class)) != NULL) { | |
2282 | MRANGE(*list); | |
2283 | ||
2284 | m = (struct mbuf *)*list; | |
2285 | sp = slab_get(m); | |
2286 | cl = m->m_ext.ext_buf; | |
2287 | clsp = slab_get(cl); | |
2288 | VERIFY(m->m_flags == M_EXT && cl != NULL); | |
2289 | VERIFY(MEXT_RFA(m) != NULL && MBUF_IS_COMPOSITE(m)); | |
6d2010ae A |
2290 | |
2291 | if (class == MC_MBUF_CL) { | |
2292 | VERIFY(clsp->sl_refcnt >= 1 && | |
3e170ce0 | 2293 | clsp->sl_refcnt <= NCLPG); |
6d2010ae | 2294 | } else { |
3e170ce0 A |
2295 | VERIFY(clsp->sl_refcnt >= 1 && |
2296 | clsp->sl_refcnt <= NBCLPG); | |
6d2010ae A |
2297 | } |
2298 | ||
2299 | if (class == MC_MBUF_16KCL) { | |
2d21ac55 | 2300 | int k; |
6d2010ae | 2301 | for (nsp = clsp, k = 1; k < NSLABSP16KB; k++) { |
2d21ac55 A |
2302 | nsp = nsp->sl_next; |
2303 | /* Next slab must already be present */ | |
2304 | VERIFY(nsp != NULL); | |
2305 | VERIFY(nsp->sl_refcnt == 1); | |
2306 | } | |
2307 | } | |
2308 | ||
2309 | if ((m_cobjlist(class) = (*list)->obj_next) != NULL && | |
2310 | !MBUF_IN_MAP(m_cobjlist(class))) { | |
2311 | slab_nextptr_panic(sp, m_cobjlist(class)); | |
2312 | /* NOTREACHED */ | |
2313 | } | |
2314 | (*list)->obj_next = NULL; | |
2315 | list = *plist = &(*list)->obj_next; | |
2316 | ||
2317 | if (--need == 0) | |
2318 | break; | |
2319 | } | |
2320 | m_infree(class) -= (num - need); | |
2321 | ||
2322 | return (num - need); | |
2323 | } | |
2324 | ||
2325 | /* | |
2326 | * Place object(s) back into a composite class's freelist. | |
2327 | */ | |
2328 | static unsigned int | |
2329 | cslab_free(mbuf_class_t class, mcache_obj_t *list, int purged) | |
2330 | { | |
2331 | mcache_obj_t *o, *tail; | |
2332 | unsigned int num = 0; | |
2333 | struct mbuf *m, *ms; | |
2334 | mcache_audit_t *mca = NULL; | |
2335 | mcache_obj_t *ref_list = NULL; | |
2336 | mcl_slab_t *clsp, *nsp; | |
2337 | void *cl; | |
6d2010ae | 2338 | mbuf_class_t cl_class; |
2d21ac55 A |
2339 | |
2340 | ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class)); | |
2341 | VERIFY(class != MC_MBUF_16KCL || njcl > 0); | |
2342 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
2343 | ||
6d2010ae A |
2344 | if (class == MC_MBUF_CL) { |
2345 | cl_class = MC_CL; | |
2346 | } else if (class == MC_MBUF_BIGCL) { | |
2347 | cl_class = MC_BIGCL; | |
2348 | } else { | |
2349 | VERIFY(class == MC_MBUF_16KCL); | |
2350 | cl_class = MC_16KCL; | |
2351 | } | |
2352 | ||
2d21ac55 A |
2353 | o = tail = list; |
2354 | ||
2355 | while ((m = ms = (struct mbuf *)o) != NULL) { | |
2356 | mcache_obj_t *rfa, *nexto = o->obj_next; | |
2357 | ||
2358 | /* Do the mbuf sanity checks */ | |
2359 | if (mclaudit != NULL) { | |
2360 | mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m); | |
6d2010ae A |
2361 | if (mclverify) { |
2362 | mcache_audit_free_verify(mca, m, 0, | |
2363 | m_maxsize(MC_MBUF)); | |
2364 | } | |
39236c6e | 2365 | ms = MCA_SAVED_MBUF_PTR(mca); |
2d21ac55 A |
2366 | } |
2367 | ||
2368 | /* Do the cluster sanity checks */ | |
2369 | cl = ms->m_ext.ext_buf; | |
2370 | clsp = slab_get(cl); | |
6d2010ae A |
2371 | if (mclverify) { |
2372 | size_t size = m_maxsize(cl_class); | |
2373 | mcache_audit_free_verify(mcl_audit_buf2mca(cl_class, | |
2d21ac55 A |
2374 | (mcache_obj_t *)cl), cl, 0, size); |
2375 | } | |
2376 | VERIFY(ms->m_type == MT_FREE); | |
2377 | VERIFY(ms->m_flags == M_EXT); | |
2378 | VERIFY(MEXT_RFA(ms) != NULL && MBUF_IS_COMPOSITE(ms)); | |
6d2010ae A |
2379 | if (cl_class == MC_CL) { |
2380 | VERIFY(clsp->sl_refcnt >= 1 && | |
3e170ce0 | 2381 | clsp->sl_refcnt <= NCLPG); |
6d2010ae | 2382 | } else { |
39037602 | 2383 | VERIFY(clsp->sl_refcnt >= 1 && |
3e170ce0 | 2384 | clsp->sl_refcnt <= NBCLPG); |
6d2010ae A |
2385 | } |
2386 | if (cl_class == MC_16KCL) { | |
2d21ac55 | 2387 | int k; |
6d2010ae | 2388 | for (nsp = clsp, k = 1; k < NSLABSP16KB; k++) { |
2d21ac55 A |
2389 | nsp = nsp->sl_next; |
2390 | /* Next slab must already be present */ | |
2391 | VERIFY(nsp != NULL); | |
2392 | VERIFY(nsp->sl_refcnt == 1); | |
2393 | } | |
2394 | } | |
2395 | ||
2396 | /* | |
2397 | * If we're asked to purge, restore the actual mbuf using | |
2398 | * contents of the shadow structure (if auditing is enabled) | |
2399 | * and clear EXTF_COMPOSITE flag from the mbuf, as we are | |
2400 | * about to free it and the attached cluster into their caches. | |
2401 | */ | |
2402 | if (purged) { | |
2403 | /* Restore constructed mbuf fields */ | |
2404 | if (mclaudit != NULL) | |
2405 | mcl_audit_restore_mbuf(m, mca, TRUE); | |
2406 | ||
39037602 | 2407 | MEXT_MINREF(m) = 0; |
2d21ac55 | 2408 | MEXT_REF(m) = 0; |
39037602 | 2409 | MEXT_PREF(m) = 0; |
2d21ac55 | 2410 | MEXT_FLAGS(m) = 0; |
39037602 A |
2411 | MEXT_PRIV(m) = 0; |
2412 | MEXT_PMBUF(m) = NULL; | |
2d21ac55 | 2413 | |
316670eb | 2414 | rfa = (mcache_obj_t *)(void *)MEXT_RFA(m); |
2d21ac55 A |
2415 | rfa->obj_next = ref_list; |
2416 | ref_list = rfa; | |
2417 | MEXT_RFA(m) = NULL; | |
2418 | ||
2419 | m->m_type = MT_FREE; | |
2420 | m->m_flags = m->m_len = 0; | |
2421 | m->m_next = m->m_nextpkt = NULL; | |
2422 | ||
2423 | /* Save mbuf fields and make auditing happy */ | |
2424 | if (mclaudit != NULL) | |
2425 | mcl_audit_mbuf(mca, o, FALSE, FALSE); | |
2426 | ||
2427 | VERIFY(m_total(class) > 0); | |
2428 | m_total(class)--; | |
2429 | ||
2430 | /* Free the mbuf */ | |
2431 | o->obj_next = NULL; | |
2432 | slab_free(MC_MBUF, o); | |
2433 | ||
2434 | /* And free the cluster */ | |
2435 | ((mcache_obj_t *)cl)->obj_next = NULL; | |
2436 | if (class == MC_MBUF_CL) | |
2437 | slab_free(MC_CL, cl); | |
2438 | else if (class == MC_MBUF_BIGCL) | |
2439 | slab_free(MC_BIGCL, cl); | |
2440 | else | |
2441 | slab_free(MC_16KCL, cl); | |
2442 | } | |
2443 | ||
2444 | ++num; | |
2445 | tail = o; | |
2446 | o = nexto; | |
2447 | } | |
2448 | ||
2449 | if (!purged) { | |
2450 | tail->obj_next = m_cobjlist(class); | |
2451 | m_cobjlist(class) = list; | |
2452 | m_infree(class) += num; | |
2453 | } else if (ref_list != NULL) { | |
2454 | mcache_free_ext(ref_cache, ref_list); | |
2455 | } | |
2456 | ||
2457 | return (num); | |
2458 | } | |
2459 | ||
2460 | /* | |
2461 | * Common allocator for composite objects called by the CPU cache layer | |
2462 | * during an allocation request whenever there is no available element in | |
2463 | * the bucket layer. It returns one or more composite elements from the | |
2464 | * appropriate global freelist. If the freelist is empty, it will attempt | |
2465 | * to obtain the rudimentary objects from their caches and construct them | |
2466 | * into composite mbuf + cluster objects. | |
2467 | */ | |
2468 | static unsigned int | |
2469 | mbuf_cslab_alloc(void *arg, mcache_obj_t ***plist, unsigned int needed, | |
2470 | int wait) | |
2471 | { | |
2472 | mbuf_class_t class = (mbuf_class_t)arg; | |
6d2010ae | 2473 | mbuf_class_t cl_class = 0; |
2d21ac55 A |
2474 | unsigned int num = 0, cnum = 0, want = needed; |
2475 | mcache_obj_t *ref_list = NULL; | |
2476 | mcache_obj_t *mp_list = NULL; | |
2477 | mcache_obj_t *clp_list = NULL; | |
2478 | mcache_obj_t **list; | |
2479 | struct ext_ref *rfa; | |
2480 | struct mbuf *m; | |
2481 | void *cl; | |
2482 | ||
2483 | ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class)); | |
2484 | ASSERT(needed > 0); | |
2485 | ||
2486 | VERIFY(class != MC_MBUF_16KCL || njcl > 0); | |
2487 | ||
2488 | /* There should not be any slab for this class */ | |
2489 | VERIFY(m_slab_cnt(class) == 0 && | |
2490 | m_slablist(class).tqh_first == NULL && | |
2491 | m_slablist(class).tqh_last == NULL); | |
2492 | ||
2493 | lck_mtx_lock(mbuf_mlock); | |
2494 | ||
2495 | /* Try using the freelist first */ | |
2496 | num = cslab_alloc(class, plist, needed); | |
2497 | list = *plist; | |
2498 | if (num == needed) { | |
2499 | m_alloc_cnt(class) += num; | |
2500 | lck_mtx_unlock(mbuf_mlock); | |
2501 | return (needed); | |
2502 | } | |
2503 | ||
2504 | lck_mtx_unlock(mbuf_mlock); | |
2505 | ||
2506 | /* | |
2507 | * We could not satisfy the request using the freelist alone; | |
2508 | * allocate from the appropriate rudimentary caches and use | |
2509 | * whatever we can get to construct the composite objects. | |
2510 | */ | |
2511 | needed -= num; | |
2512 | ||
2513 | /* | |
2514 | * Mark these allocation requests as coming from a composite cache. | |
2515 | * Also, if the caller is willing to be blocked, mark the request | |
2516 | * with MCR_FAILOK such that we don't end up sleeping at the mbuf | |
2517 | * slab layer waiting for the individual object when one or more | |
2518 | * of the already-constructed composite objects are available. | |
2519 | */ | |
2520 | wait |= MCR_COMP; | |
2521 | if (!(wait & MCR_NOSLEEP)) | |
2522 | wait |= MCR_FAILOK; | |
2523 | ||
6d2010ae | 2524 | /* allocate mbufs */ |
2d21ac55 A |
2525 | needed = mcache_alloc_ext(m_cache(MC_MBUF), &mp_list, needed, wait); |
2526 | if (needed == 0) { | |
2527 | ASSERT(mp_list == NULL); | |
2528 | goto fail; | |
2529 | } | |
6d2010ae A |
2530 | |
2531 | /* allocate clusters */ | |
2532 | if (class == MC_MBUF_CL) { | |
2533 | cl_class = MC_CL; | |
2534 | } else if (class == MC_MBUF_BIGCL) { | |
2535 | cl_class = MC_BIGCL; | |
2536 | } else { | |
2537 | VERIFY(class == MC_MBUF_16KCL); | |
2538 | cl_class = MC_16KCL; | |
2539 | } | |
2540 | needed = mcache_alloc_ext(m_cache(cl_class), &clp_list, needed, wait); | |
2d21ac55 A |
2541 | if (needed == 0) { |
2542 | ASSERT(clp_list == NULL); | |
2543 | goto fail; | |
2544 | } | |
6d2010ae | 2545 | |
2d21ac55 A |
2546 | needed = mcache_alloc_ext(ref_cache, &ref_list, needed, wait); |
2547 | if (needed == 0) { | |
2548 | ASSERT(ref_list == NULL); | |
2549 | goto fail; | |
2550 | } | |
2551 | ||
2552 | /* | |
2553 | * By this time "needed" is MIN(mbuf, cluster, ref). Any left | |
2554 | * overs will get freed accordingly before we return to caller. | |
2555 | */ | |
2556 | for (cnum = 0; cnum < needed; cnum++) { | |
2557 | struct mbuf *ms; | |
2558 | ||
2559 | m = ms = (struct mbuf *)mp_list; | |
2560 | mp_list = mp_list->obj_next; | |
2561 | ||
2562 | cl = clp_list; | |
2563 | clp_list = clp_list->obj_next; | |
2564 | ((mcache_obj_t *)cl)->obj_next = NULL; | |
2565 | ||
2566 | rfa = (struct ext_ref *)ref_list; | |
2567 | ref_list = ref_list->obj_next; | |
316670eb | 2568 | ((mcache_obj_t *)(void *)rfa)->obj_next = NULL; |
2d21ac55 A |
2569 | |
2570 | /* | |
2571 | * If auditing is enabled, construct the shadow mbuf | |
2572 | * in the audit structure instead of in the actual one. | |
2573 | * mbuf_cslab_audit() will take care of restoring the | |
2574 | * contents after the integrity check. | |
2575 | */ | |
2576 | if (mclaudit != NULL) { | |
2577 | mcache_audit_t *mca, *cl_mca; | |
2d21ac55 A |
2578 | |
2579 | lck_mtx_lock(mbuf_mlock); | |
2580 | mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m); | |
39236c6e | 2581 | ms = MCA_SAVED_MBUF_PTR(mca); |
3e170ce0 A |
2582 | cl_mca = mcl_audit_buf2mca(cl_class, |
2583 | (mcache_obj_t *)cl); | |
2d21ac55 A |
2584 | |
2585 | /* | |
2586 | * Pair them up. Note that this is done at the time | |
2587 | * the mbuf+cluster objects are constructed. This | |
2588 | * information should be treated as "best effort" | |
2589 | * debugging hint since more than one mbufs can refer | |
2590 | * to a cluster. In that case, the cluster might not | |
2591 | * be freed along with the mbuf it was paired with. | |
2592 | */ | |
2593 | mca->mca_uptr = cl_mca; | |
2594 | cl_mca->mca_uptr = mca; | |
2595 | ||
2596 | ASSERT(mca->mca_uflags & MB_SCVALID); | |
2597 | ASSERT(!(cl_mca->mca_uflags & MB_SCVALID)); | |
2598 | lck_mtx_unlock(mbuf_mlock); | |
2599 | ||
2600 | /* Technically, they are in the freelist */ | |
6d2010ae A |
2601 | if (mclverify) { |
2602 | size_t size; | |
2603 | ||
2604 | mcache_set_pattern(MCACHE_FREE_PATTERN, m, | |
2605 | m_maxsize(MC_MBUF)); | |
2606 | ||
2607 | if (class == MC_MBUF_CL) | |
2608 | size = m_maxsize(MC_CL); | |
2609 | else if (class == MC_MBUF_BIGCL) | |
2610 | size = m_maxsize(MC_BIGCL); | |
2611 | else | |
2612 | size = m_maxsize(MC_16KCL); | |
2613 | ||
2614 | mcache_set_pattern(MCACHE_FREE_PATTERN, cl, | |
2615 | size); | |
2616 | } | |
2d21ac55 A |
2617 | } |
2618 | ||
2619 | MBUF_INIT(ms, 0, MT_FREE); | |
2620 | if (class == MC_MBUF_16KCL) { | |
2621 | MBUF_16KCL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE); | |
2622 | } else if (class == MC_MBUF_BIGCL) { | |
2623 | MBUF_BIGCL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE); | |
2624 | } else { | |
2625 | MBUF_CL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE); | |
2626 | } | |
2627 | VERIFY(ms->m_flags == M_EXT); | |
2628 | VERIFY(MEXT_RFA(ms) != NULL && MBUF_IS_COMPOSITE(ms)); | |
2629 | ||
2630 | *list = (mcache_obj_t *)m; | |
2631 | (*list)->obj_next = NULL; | |
2632 | list = *plist = &(*list)->obj_next; | |
2633 | } | |
2634 | ||
2635 | fail: | |
2636 | /* | |
2637 | * Free up what's left of the above. | |
2638 | */ | |
2639 | if (mp_list != NULL) | |
2640 | mcache_free_ext(m_cache(MC_MBUF), mp_list); | |
2641 | if (clp_list != NULL) | |
6d2010ae | 2642 | mcache_free_ext(m_cache(cl_class), clp_list); |
2d21ac55 A |
2643 | if (ref_list != NULL) |
2644 | mcache_free_ext(ref_cache, ref_list); | |
2645 | ||
2646 | lck_mtx_lock(mbuf_mlock); | |
2647 | if (num > 0 || cnum > 0) { | |
2648 | m_total(class) += cnum; | |
2649 | VERIFY(m_total(class) <= m_maxlimit(class)); | |
2650 | m_alloc_cnt(class) += num + cnum; | |
2651 | } | |
2652 | if ((num + cnum) < want) | |
2653 | m_fail_cnt(class) += (want - (num + cnum)); | |
2654 | lck_mtx_unlock(mbuf_mlock); | |
2655 | ||
2656 | return (num + cnum); | |
2657 | } | |
2658 | ||
2659 | /* | |
2660 | * Common de-allocator for composite objects called by the CPU cache | |
2661 | * layer when one or more elements need to be returned to the appropriate | |
2662 | * global freelist. | |
2663 | */ | |
2664 | static void | |
2665 | mbuf_cslab_free(void *arg, mcache_obj_t *list, int purged) | |
2666 | { | |
2667 | mbuf_class_t class = (mbuf_class_t)arg; | |
2668 | unsigned int num; | |
2669 | int w; | |
2670 | ||
2671 | ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class)); | |
2672 | ||
2673 | lck_mtx_lock(mbuf_mlock); | |
2674 | ||
2675 | num = cslab_free(class, list, purged); | |
2676 | m_free_cnt(class) += num; | |
2677 | ||
2678 | if ((w = mb_waiters) > 0) | |
2679 | mb_waiters = 0; | |
2680 | ||
2681 | lck_mtx_unlock(mbuf_mlock); | |
2682 | ||
2683 | if (w != 0) | |
2684 | wakeup(mb_waitchan); | |
2685 | } | |
2686 | ||
2687 | /* | |
2688 | * Common auditor for composite objects called by the CPU cache layer | |
2689 | * during an allocation or free request. For the former, this is called | |
2690 | * after the objects are obtained from either the bucket or slab layer | |
2691 | * and before they are returned to the caller. For the latter, this is | |
2692 | * called immediately during free and before placing the objects into | |
2693 | * the bucket or slab layer. | |
2694 | */ | |
2695 | static void | |
2696 | mbuf_cslab_audit(void *arg, mcache_obj_t *list, boolean_t alloc) | |
2697 | { | |
3e170ce0 | 2698 | mbuf_class_t class = (mbuf_class_t)arg, cl_class; |
2d21ac55 A |
2699 | mcache_audit_t *mca; |
2700 | struct mbuf *m, *ms; | |
2701 | mcl_slab_t *clsp, *nsp; | |
3e170ce0 | 2702 | size_t cl_size; |
2d21ac55 A |
2703 | void *cl; |
2704 | ||
2705 | ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class)); | |
3e170ce0 A |
2706 | if (class == MC_MBUF_CL) |
2707 | cl_class = MC_CL; | |
2708 | else if (class == MC_MBUF_BIGCL) | |
2709 | cl_class = MC_BIGCL; | |
2710 | else | |
2711 | cl_class = MC_16KCL; | |
2712 | cl_size = m_maxsize(cl_class); | |
2d21ac55 A |
2713 | |
2714 | while ((m = ms = (struct mbuf *)list) != NULL) { | |
2715 | lck_mtx_lock(mbuf_mlock); | |
2716 | /* Do the mbuf sanity checks and record its transaction */ | |
2717 | mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m); | |
2718 | mcl_audit_mbuf(mca, m, TRUE, alloc); | |
6d2010ae | 2719 | if (mcltrace) |
39236c6e | 2720 | mcache_buffer_log(mca, m, m_cache(class), &mb_start); |
6d2010ae | 2721 | |
2d21ac55 A |
2722 | if (alloc) |
2723 | mca->mca_uflags |= MB_COMP_INUSE; | |
2724 | else | |
2725 | mca->mca_uflags &= ~MB_COMP_INUSE; | |
2726 | ||
2727 | /* | |
2728 | * Use the shadow mbuf in the audit structure if we are | |
2729 | * freeing, since the contents of the actual mbuf has been | |
2730 | * pattern-filled by the above call to mcl_audit_mbuf(). | |
2731 | */ | |
6d2010ae | 2732 | if (!alloc && mclverify) |
39236c6e | 2733 | ms = MCA_SAVED_MBUF_PTR(mca); |
2d21ac55 A |
2734 | |
2735 | /* Do the cluster sanity checks and record its transaction */ | |
2736 | cl = ms->m_ext.ext_buf; | |
2737 | clsp = slab_get(cl); | |
2738 | VERIFY(ms->m_flags == M_EXT && cl != NULL); | |
2739 | VERIFY(MEXT_RFA(ms) != NULL && MBUF_IS_COMPOSITE(ms)); | |
6d2010ae A |
2740 | if (class == MC_MBUF_CL) |
2741 | VERIFY(clsp->sl_refcnt >= 1 && | |
3e170ce0 | 2742 | clsp->sl_refcnt <= NCLPG); |
6d2010ae | 2743 | else |
3e170ce0 A |
2744 | VERIFY(clsp->sl_refcnt >= 1 && |
2745 | clsp->sl_refcnt <= NBCLPG); | |
6d2010ae A |
2746 | |
2747 | if (class == MC_MBUF_16KCL) { | |
2d21ac55 | 2748 | int k; |
6d2010ae | 2749 | for (nsp = clsp, k = 1; k < NSLABSP16KB; k++) { |
2d21ac55 A |
2750 | nsp = nsp->sl_next; |
2751 | /* Next slab must already be present */ | |
2752 | VERIFY(nsp != NULL); | |
2753 | VERIFY(nsp->sl_refcnt == 1); | |
2754 | } | |
2755 | } | |
2756 | ||
3e170ce0 A |
2757 | |
2758 | mca = mcl_audit_buf2mca(cl_class, cl); | |
2759 | mcl_audit_cluster(mca, cl, cl_size, alloc, FALSE); | |
6d2010ae | 2760 | if (mcltrace) |
39236c6e | 2761 | mcache_buffer_log(mca, cl, m_cache(class), &mb_start); |
6d2010ae | 2762 | |
2d21ac55 A |
2763 | if (alloc) |
2764 | mca->mca_uflags |= MB_COMP_INUSE; | |
2765 | else | |
2766 | mca->mca_uflags &= ~MB_COMP_INUSE; | |
2767 | lck_mtx_unlock(mbuf_mlock); | |
2768 | ||
2769 | list = list->obj_next; | |
2770 | } | |
2771 | } | |
2772 | ||
2773 | /* | |
2774 | * Allocate some number of mbuf clusters and place on cluster freelist. | |
2775 | */ | |
2776 | static int | |
2777 | m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize) | |
2778 | { | |
3e170ce0 | 2779 | int i, count = 0; |
2d21ac55 | 2780 | vm_size_t size = 0; |
3e170ce0 | 2781 | int numpages = 0, large_buffer; |
2d21ac55 A |
2782 | vm_offset_t page = 0; |
2783 | mcache_audit_t *mca_list = NULL; | |
2784 | mcache_obj_t *con_list = NULL; | |
2785 | mcl_slab_t *sp; | |
3e170ce0 | 2786 | mbuf_class_t class; |
2d21ac55 | 2787 | |
3e170ce0 A |
2788 | /* Set if a buffer allocation needs allocation of multiple pages */ |
2789 | large_buffer = ((bufsize == m_maxsize(MC_16KCL)) && | |
2790 | PAGE_SIZE < M16KCLBYTES); | |
6d2010ae A |
2791 | VERIFY(bufsize == m_maxsize(MC_BIGCL) || |
2792 | bufsize == m_maxsize(MC_16KCL)); | |
2d21ac55 | 2793 | |
3e170ce0 A |
2794 | VERIFY((bufsize == PAGE_SIZE) || |
2795 | (bufsize > PAGE_SIZE && bufsize == m_maxsize(MC_16KCL))); | |
2796 | ||
2797 | if (bufsize == m_size(MC_BIGCL)) | |
2798 | class = MC_BIGCL; | |
2799 | else | |
2800 | class = MC_16KCL; | |
2801 | ||
2d21ac55 A |
2802 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); |
2803 | ||
2804 | /* | |
2805 | * Multiple threads may attempt to populate the cluster map one | |
2806 | * after another. Since we drop the lock below prior to acquiring | |
2807 | * the physical page(s), our view of the cluster map may no longer | |
2808 | * be accurate, and we could end up over-committing the pages beyond | |
2809 | * the maximum allowed for each class. To prevent it, this entire | |
2810 | * operation (including the page mapping) is serialized. | |
2811 | */ | |
2812 | while (mb_clalloc_busy) { | |
2813 | mb_clalloc_waiters++; | |
2814 | (void) msleep(mb_clalloc_waitchan, mbuf_mlock, | |
2815 | (PZERO-1), "m_clalloc", NULL); | |
2816 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
2817 | } | |
2818 | ||
2819 | /* We are busy now; tell everyone else to go away */ | |
2820 | mb_clalloc_busy = TRUE; | |
2821 | ||
2822 | /* | |
2823 | * Honor the caller's wish to block or not block. We have a way | |
2824 | * to grow the pool asynchronously using the mbuf worker thread. | |
2825 | */ | |
2826 | i = m_howmany(num, bufsize); | |
2827 | if (i == 0 || (wait & M_DONTWAIT)) | |
2828 | goto out; | |
2829 | ||
2830 | lck_mtx_unlock(mbuf_mlock); | |
2831 | ||
b0d623f7 A |
2832 | size = round_page(i * bufsize); |
2833 | page = kmem_mb_alloc(mb_map, size, large_buffer); | |
2834 | ||
2835 | /* | |
6d2010ae | 2836 | * If we did ask for "n" 16KB physically contiguous chunks |
b0d623f7 A |
2837 | * and didn't get them, then please try again without this |
2838 | * restriction. | |
2839 | */ | |
2840 | if (large_buffer && page == 0) | |
2841 | page = kmem_mb_alloc(mb_map, size, 0); | |
2d21ac55 A |
2842 | |
2843 | if (page == 0) { | |
6d2010ae | 2844 | if (bufsize == m_maxsize(MC_BIGCL)) { |
3e170ce0 A |
2845 | /* Try for 1 page if failed */ |
2846 | size = PAGE_SIZE; | |
b0d623f7 | 2847 | page = kmem_mb_alloc(mb_map, size, 0); |
2d21ac55 A |
2848 | } |
2849 | ||
2850 | if (page == 0) { | |
2851 | lck_mtx_lock(mbuf_mlock); | |
2852 | goto out; | |
2853 | } | |
2854 | } | |
2855 | ||
3e170ce0 A |
2856 | VERIFY(IS_P2ALIGNED(page, PAGE_SIZE)); |
2857 | numpages = size / PAGE_SIZE; | |
2d21ac55 A |
2858 | |
2859 | /* If auditing is enabled, allocate the audit structures now */ | |
2860 | if (mclaudit != NULL) { | |
2861 | int needed; | |
2862 | ||
2863 | /* | |
2864 | * Yes, I realize this is a waste of memory for clusters | |
2865 | * that never get transformed into mbufs, as we may end | |
3e170ce0 | 2866 | * up with NMBPG-1 unused audit structures per cluster. |
2d21ac55 A |
2867 | * But doing so tremendously simplifies the allocation |
2868 | * strategy, since at this point we are not holding the | |
6d2010ae | 2869 | * mbuf lock and the caller is okay to be blocked. |
2d21ac55 | 2870 | */ |
3e170ce0 A |
2871 | if (bufsize == PAGE_SIZE) { |
2872 | needed = numpages * NMBPG; | |
2d21ac55 A |
2873 | |
2874 | i = mcache_alloc_ext(mcl_audit_con_cache, | |
2875 | &con_list, needed, MCR_SLEEP); | |
2876 | ||
2877 | VERIFY(con_list != NULL && i == needed); | |
2d21ac55 | 2878 | } else { |
3e170ce0 A |
2879 | /* |
2880 | * if multiple 4K pages are being used for a | |
39037602 A |
2881 | * 16K cluster |
2882 | */ | |
6d2010ae | 2883 | needed = numpages / NSLABSP16KB; |
2d21ac55 A |
2884 | } |
2885 | ||
2886 | i = mcache_alloc_ext(mcache_audit_cache, | |
2887 | (mcache_obj_t **)&mca_list, needed, MCR_SLEEP); | |
2888 | ||
2889 | VERIFY(mca_list != NULL && i == needed); | |
2890 | } | |
2891 | ||
2892 | lck_mtx_lock(mbuf_mlock); | |
2893 | ||
3e170ce0 A |
2894 | for (i = 0; i < numpages; i++, page += PAGE_SIZE) { |
2895 | ppnum_t offset = | |
2896 | ((unsigned char *)page - mbutl) >> PAGE_SHIFT; | |
99c3a104 | 2897 | ppnum_t new_page = pmap_find_phys(kernel_pmap, page); |
2d21ac55 A |
2898 | |
2899 | /* | |
3e170ce0 A |
2900 | * If there is a mapper the appropriate I/O page is |
2901 | * returned; zero out the page to discard its past | |
2902 | * contents to prevent exposing leftover kernel memory. | |
2d21ac55 | 2903 | */ |
b0d623f7 | 2904 | VERIFY(offset < mcl_pages); |
39236c6e | 2905 | if (mcl_paddr_base != 0) { |
3e170ce0 | 2906 | bzero((void *)(uintptr_t) page, PAGE_SIZE); |
39236c6e A |
2907 | new_page = IOMapperInsertPage(mcl_paddr_base, |
2908 | offset, new_page); | |
99c3a104 | 2909 | } |
39236c6e | 2910 | mcl_paddr[offset] = new_page; |
2d21ac55 A |
2911 | |
2912 | /* Pattern-fill this fresh page */ | |
6d2010ae | 2913 | if (mclverify) { |
2d21ac55 | 2914 | mcache_set_pattern(MCACHE_FREE_PATTERN, |
3e170ce0 | 2915 | (caddr_t)page, PAGE_SIZE); |
6d2010ae | 2916 | } |
3e170ce0 A |
2917 | if (bufsize == PAGE_SIZE) { |
2918 | mcache_obj_t *buf; | |
2d21ac55 | 2919 | /* One for the entire page */ |
3e170ce0 | 2920 | sp = slab_get((void *)page); |
6d2010ae | 2921 | if (mclaudit != NULL) { |
3e170ce0 A |
2922 | mcl_audit_init((void *)page, |
2923 | &mca_list, &con_list, | |
2924 | AUDIT_CONTENTS_SIZE, NMBPG); | |
6d2010ae | 2925 | } |
2d21ac55 | 2926 | VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0); |
3e170ce0 A |
2927 | slab_init(sp, class, SLF_MAPPED, (void *)page, |
2928 | (void *)page, PAGE_SIZE, 0, 1); | |
2929 | buf = (mcache_obj_t *)page; | |
2930 | buf->obj_next = NULL; | |
2d21ac55 | 2931 | |
2d21ac55 | 2932 | /* Insert this slab */ |
3e170ce0 A |
2933 | slab_insert(sp, class); |
2934 | ||
2935 | /* Update stats now since slab_get drops the lock */ | |
2936 | ++m_infree(class); | |
2937 | ++m_total(class); | |
2938 | VERIFY(m_total(class) <= m_maxlimit(class)); | |
2939 | if (class == MC_BIGCL) { | |
2940 | mbstat.m_bigclfree = m_infree(MC_BIGCL) + | |
2941 | m_infree(MC_MBUF_BIGCL); | |
2942 | mbstat.m_bigclusters = m_total(MC_BIGCL); | |
2943 | } | |
2944 | ++count; | |
2945 | } else if ((bufsize > PAGE_SIZE) && | |
2946 | (i % NSLABSP16KB) == 0) { | |
2d21ac55 A |
2947 | union m16kcluster *m16kcl = (union m16kcluster *)page; |
2948 | mcl_slab_t *nsp; | |
2949 | int k; | |
39037602 | 2950 | |
2d21ac55 A |
2951 | /* One for the entire 16KB */ |
2952 | sp = slab_get(m16kcl); | |
2953 | if (mclaudit != NULL) | |
2954 | mcl_audit_init(m16kcl, &mca_list, NULL, 0, 1); | |
2955 | ||
2956 | VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0); | |
2957 | slab_init(sp, MC_16KCL, SLF_MAPPED, | |
2958 | m16kcl, m16kcl, bufsize, 0, 1); | |
3e170ce0 | 2959 | m16kcl->m16kcl_next = NULL; |
2d21ac55 | 2960 | |
6d2010ae A |
2961 | /* |
2962 | * 2nd-Nth page's slab is part of the first one, | |
2963 | * where N is NSLABSP16KB. | |
2964 | */ | |
2965 | for (k = 1; k < NSLABSP16KB; k++) { | |
2966 | nsp = slab_get(((union mbigcluster *)page) + k); | |
2d21ac55 A |
2967 | VERIFY(nsp->sl_refcnt == 0 && |
2968 | nsp->sl_flags == 0); | |
2969 | slab_init(nsp, MC_16KCL, | |
2970 | SLF_MAPPED | SLF_PARTIAL, | |
2971 | m16kcl, NULL, 0, 0, 0); | |
2972 | } | |
2d21ac55 A |
2973 | /* Insert this slab */ |
2974 | slab_insert(sp, MC_16KCL); | |
2975 | ||
3e170ce0 A |
2976 | /* Update stats now since slab_get drops the lock */ |
2977 | ++m_infree(MC_16KCL); | |
2978 | ++m_total(MC_16KCL); | |
2d21ac55 | 2979 | VERIFY(m_total(MC_16KCL) <= m_maxlimit(MC_16KCL)); |
3e170ce0 | 2980 | ++count; |
2d21ac55 A |
2981 | } |
2982 | } | |
2983 | VERIFY(mca_list == NULL && con_list == NULL); | |
2984 | ||
3e170ce0 A |
2985 | if (!mb_peak_newreport && mbuf_report_usage(class)) |
2986 | mb_peak_newreport = TRUE; | |
2987 | ||
2d21ac55 A |
2988 | /* We're done; let others enter */ |
2989 | mb_clalloc_busy = FALSE; | |
2990 | if (mb_clalloc_waiters > 0) { | |
2991 | mb_clalloc_waiters = 0; | |
2992 | wakeup(mb_clalloc_waitchan); | |
2993 | } | |
2994 | ||
3e170ce0 | 2995 | return (count); |
2d21ac55 A |
2996 | out: |
2997 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
2998 | ||
2999 | /* We're done; let others enter */ | |
3000 | mb_clalloc_busy = FALSE; | |
3001 | if (mb_clalloc_waiters > 0) { | |
3002 | mb_clalloc_waiters = 0; | |
3003 | wakeup(mb_clalloc_waitchan); | |
3004 | } | |
3005 | ||
3006 | /* | |
3007 | * When non-blocking we kick a thread if we have to grow the | |
3008 | * pool or if the number of free clusters is less than requested. | |
3009 | */ | |
39037602 A |
3010 | if (i > 0 && mbuf_worker_ready && mbuf_worker_needs_wakeup) { |
3011 | wakeup((caddr_t)&mbuf_worker_needs_wakeup); | |
3012 | mbuf_worker_needs_wakeup = FALSE; | |
3013 | } | |
3e170ce0 | 3014 | if (class == MC_BIGCL) { |
2d21ac55 A |
3015 | if (i > 0) { |
3016 | /* | |
3017 | * Remember total number of 4KB clusters needed | |
3018 | * at this time. | |
3019 | */ | |
3020 | i += m_total(MC_BIGCL); | |
3021 | if (i > mbuf_expand_big) { | |
3022 | mbuf_expand_big = i; | |
2d21ac55 A |
3023 | } |
3024 | } | |
2d21ac55 A |
3025 | if (m_infree(MC_BIGCL) >= num) |
3026 | return (1); | |
3027 | } else { | |
3028 | if (i > 0) { | |
3029 | /* | |
3030 | * Remember total number of 16KB clusters needed | |
3031 | * at this time. | |
3032 | */ | |
3033 | i += m_total(MC_16KCL); | |
3034 | if (i > mbuf_expand_16k) { | |
3035 | mbuf_expand_16k = i; | |
2d21ac55 A |
3036 | } |
3037 | } | |
2d21ac55 A |
3038 | if (m_infree(MC_16KCL) >= num) |
3039 | return (1); | |
3040 | } | |
3041 | return (0); | |
3042 | } | |
3043 | ||
3044 | /* | |
3045 | * Populate the global freelist of the corresponding buffer class. | |
3046 | */ | |
3047 | static int | |
3048 | freelist_populate(mbuf_class_t class, unsigned int num, int wait) | |
3049 | { | |
3050 | mcache_obj_t *o = NULL; | |
6d2010ae | 3051 | int i, numpages = 0, count; |
3e170ce0 | 3052 | mbuf_class_t super_class; |
2d21ac55 A |
3053 | |
3054 | VERIFY(class == MC_MBUF || class == MC_CL || class == MC_BIGCL || | |
3055 | class == MC_16KCL); | |
3056 | ||
2d21ac55 A |
3057 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); |
3058 | ||
3e170ce0 A |
3059 | VERIFY(PAGE_SIZE == m_maxsize(MC_BIGCL) || |
3060 | PAGE_SIZE == m_maxsize(MC_16KCL)); | |
2d21ac55 | 3061 | |
3e170ce0 A |
3062 | if (m_maxsize(class) >= PAGE_SIZE) |
3063 | return(m_clalloc(num, wait, m_maxsize(class)) != 0); | |
2d21ac55 | 3064 | |
3e170ce0 A |
3065 | /* |
3066 | * The rest of the function will allocate pages and will slice | |
3067 | * them up into the right size | |
3068 | */ | |
2d21ac55 | 3069 | |
3e170ce0 A |
3070 | numpages = (num * m_size(class) + PAGE_SIZE - 1) / PAGE_SIZE; |
3071 | ||
3072 | /* Currently assume that pages are 4K or 16K */ | |
3073 | if (PAGE_SIZE == m_maxsize(MC_BIGCL)) | |
3074 | super_class = MC_BIGCL; | |
3075 | else | |
3076 | super_class = MC_16KCL; | |
2d21ac55 | 3077 | |
3e170ce0 A |
3078 | i = m_clalloc(numpages, wait, m_maxsize(super_class)); |
3079 | ||
3080 | /* Respect the minimum limit of super class */ | |
3081 | if (m_total(super_class) == m_maxlimit(super_class) && | |
3082 | m_infree(super_class) <= m_minlimit(super_class)) | |
3083 | if (wait & MCR_COMP) | |
3084 | return (0); | |
6d2010ae A |
3085 | |
3086 | /* how many objects will we cut the page into? */ | |
3e170ce0 | 3087 | int numobj = PAGE_SIZE / m_maxsize(class); |
6d2010ae A |
3088 | |
3089 | for (count = 0; count < numpages; count++) { | |
6d2010ae | 3090 | /* respect totals, minlimit, maxlimit */ |
3e170ce0 | 3091 | if (m_total(super_class) <= m_minlimit(super_class) || |
6d2010ae A |
3092 | m_total(class) >= m_maxlimit(class)) |
3093 | break; | |
3094 | ||
3e170ce0 | 3095 | if ((o = slab_alloc(super_class, wait)) == NULL) |
6d2010ae A |
3096 | break; |
3097 | ||
2d21ac55 | 3098 | struct mbuf *m = (struct mbuf *)o; |
6d2010ae | 3099 | union mcluster *c = (union mcluster *)o; |
3e170ce0 | 3100 | union mbigcluster *mbc = (union mbigcluster *)o; |
2d21ac55 | 3101 | mcl_slab_t *sp = slab_get(o); |
6d2010ae | 3102 | mcache_audit_t *mca = NULL; |
2d21ac55 | 3103 | |
3e170ce0 A |
3104 | /* |
3105 | * since one full page will be converted to MC_MBUF or | |
3106 | * MC_CL, verify that the reference count will match that | |
3107 | * assumption | |
3108 | */ | |
39037602 | 3109 | VERIFY(sp->sl_refcnt == 1 && slab_is_detached(sp)); |
3e170ce0 | 3110 | VERIFY((sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED); |
6d2010ae A |
3111 | /* |
3112 | * Make sure that the cluster is unmolested | |
3113 | * while in freelist | |
3114 | */ | |
3115 | if (mclverify) { | |
3e170ce0 A |
3116 | mca = mcl_audit_buf2mca(super_class, |
3117 | (mcache_obj_t *)o); | |
3118 | mcache_audit_free_verify(mca, | |
3119 | (mcache_obj_t *)o, 0, m_maxsize(super_class)); | |
2d21ac55 A |
3120 | } |
3121 | ||
3e170ce0 | 3122 | /* Reinitialize it as an mbuf or 2K or 4K slab */ |
6d2010ae | 3123 | slab_init(sp, class, sp->sl_flags, |
3e170ce0 | 3124 | sp->sl_base, NULL, PAGE_SIZE, 0, numobj); |
2d21ac55 | 3125 | |
2d21ac55 A |
3126 | VERIFY(sp->sl_head == NULL); |
3127 | ||
3e170ce0 A |
3128 | VERIFY(m_total(super_class) >= 1); |
3129 | m_total(super_class)--; | |
3130 | ||
3131 | if (super_class == MC_BIGCL) | |
3132 | mbstat.m_bigclusters = m_total(MC_BIGCL); | |
2d21ac55 | 3133 | |
6d2010ae A |
3134 | m_total(class) += numobj; |
3135 | m_infree(class) += numobj; | |
3136 | ||
fe8ab488 A |
3137 | if (!mb_peak_newreport && mbuf_report_usage(class)) |
3138 | mb_peak_newreport = TRUE; | |
6d2010ae A |
3139 | |
3140 | i = numobj; | |
3141 | if (class == MC_MBUF) { | |
3142 | mbstat.m_mbufs = m_total(MC_MBUF); | |
3e170ce0 | 3143 | mtype_stat_add(MT_FREE, NMBPG); |
6d2010ae A |
3144 | while (i--) { |
3145 | /* | |
3146 | * If auditing is enabled, construct the | |
3147 | * shadow mbuf in the audit structure | |
3148 | * instead of the actual one. | |
3149 | * mbuf_slab_audit() will take care of | |
3150 | * restoring the contents after the | |
3151 | * integrity check. | |
3152 | */ | |
3153 | if (mclaudit != NULL) { | |
3154 | struct mbuf *ms; | |
3155 | mca = mcl_audit_buf2mca(MC_MBUF, | |
3156 | (mcache_obj_t *)m); | |
39236c6e | 3157 | ms = MCA_SAVED_MBUF_PTR(mca); |
6d2010ae A |
3158 | ms->m_type = MT_FREE; |
3159 | } else { | |
3160 | m->m_type = MT_FREE; | |
3161 | } | |
3162 | m->m_next = sp->sl_head; | |
3163 | sp->sl_head = (void *)m++; | |
3164 | } | |
3e170ce0 | 3165 | } else if (class == MC_CL) { /* MC_CL */ |
6d2010ae A |
3166 | mbstat.m_clfree = |
3167 | m_infree(MC_CL) + m_infree(MC_MBUF_CL); | |
3168 | mbstat.m_clusters = m_total(MC_CL); | |
3169 | while (i--) { | |
3170 | c->mcl_next = sp->sl_head; | |
3171 | sp->sl_head = (void *)c++; | |
2d21ac55 | 3172 | } |
3e170ce0 A |
3173 | } else { |
3174 | VERIFY(class == MC_BIGCL); | |
3175 | mbstat.m_bigclusters = m_total(MC_BIGCL); | |
3176 | mbstat.m_bigclfree = m_infree(MC_BIGCL) + | |
3177 | m_infree(MC_MBUF_BIGCL); | |
3178 | while (i--) { | |
3179 | mbc->mbc_next = sp->sl_head; | |
3180 | sp->sl_head = (void *)mbc++; | |
3181 | } | |
2d21ac55 A |
3182 | } |
3183 | ||
3e170ce0 | 3184 | /* Insert into the mbuf or 2k or 4k slab list */ |
6d2010ae | 3185 | slab_insert(sp, class); |
2d21ac55 A |
3186 | |
3187 | if ((i = mb_waiters) > 0) | |
3188 | mb_waiters = 0; | |
3189 | if (i != 0) | |
3190 | wakeup(mb_waitchan); | |
2d21ac55 | 3191 | } |
6d2010ae A |
3192 | return (count != 0); |
3193 | } | |
2d21ac55 | 3194 | |
6d2010ae A |
3195 | /* |
3196 | * For each class, initialize the freelist to hold m_minlimit() objects. | |
3197 | */ | |
3198 | static void | |
3199 | freelist_init(mbuf_class_t class) | |
3200 | { | |
3201 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
3202 | ||
3203 | VERIFY(class == MC_CL || class == MC_BIGCL); | |
3204 | VERIFY(m_total(class) == 0); | |
3205 | VERIFY(m_minlimit(class) > 0); | |
3206 | ||
3207 | while (m_total(class) < m_minlimit(class)) | |
3208 | (void) freelist_populate(class, m_minlimit(class), M_WAIT); | |
3209 | ||
3210 | VERIFY(m_total(class) >= m_minlimit(class)); | |
2d21ac55 A |
3211 | } |
3212 | ||
3213 | /* | |
3214 | * (Inaccurately) check if it might be worth a trip back to the | |
3215 | * mcache layer due the availability of objects there. We'll | |
3216 | * end up back here if there's nothing up there. | |
3217 | */ | |
3218 | static boolean_t | |
3219 | mbuf_cached_above(mbuf_class_t class, int wait) | |
3220 | { | |
3221 | switch (class) { | |
3222 | case MC_MBUF: | |
3223 | if (wait & MCR_COMP) | |
3224 | return (!mcache_bkt_isempty(m_cache(MC_MBUF_CL)) || | |
3225 | !mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL))); | |
3226 | break; | |
3227 | ||
3228 | case MC_CL: | |
3229 | if (wait & MCR_COMP) | |
3230 | return (!mcache_bkt_isempty(m_cache(MC_MBUF_CL))); | |
3231 | break; | |
3232 | ||
3233 | case MC_BIGCL: | |
3234 | if (wait & MCR_COMP) | |
3235 | return (!mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL))); | |
3236 | break; | |
3237 | ||
3238 | case MC_16KCL: | |
3239 | if (wait & MCR_COMP) | |
3240 | return (!mcache_bkt_isempty(m_cache(MC_MBUF_16KCL))); | |
3241 | break; | |
3242 | ||
3243 | case MC_MBUF_CL: | |
3244 | case MC_MBUF_BIGCL: | |
3245 | case MC_MBUF_16KCL: | |
3246 | break; | |
3247 | ||
3248 | default: | |
3249 | VERIFY(0); | |
3250 | /* NOTREACHED */ | |
3251 | } | |
3252 | ||
3253 | return (!mcache_bkt_isempty(m_cache(class))); | |
3254 | } | |
3255 | ||
3256 | /* | |
3257 | * If possible, convert constructed objects to raw ones. | |
3258 | */ | |
3259 | static boolean_t | |
3260 | mbuf_steal(mbuf_class_t class, unsigned int num) | |
3261 | { | |
3262 | mcache_obj_t *top = NULL; | |
3263 | mcache_obj_t **list = ⊤ | |
3264 | unsigned int tot = 0; | |
3265 | ||
3266 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
3267 | ||
3268 | switch (class) { | |
3269 | case MC_MBUF: | |
3270 | case MC_CL: | |
3271 | case MC_BIGCL: | |
3272 | case MC_16KCL: | |
3273 | return (FALSE); | |
3274 | ||
3275 | case MC_MBUF_CL: | |
3276 | case MC_MBUF_BIGCL: | |
3277 | case MC_MBUF_16KCL: | |
3278 | /* Get the required number of constructed objects if possible */ | |
3279 | if (m_infree(class) > m_minlimit(class)) { | |
3280 | tot = cslab_alloc(class, &list, | |
3281 | MIN(num, m_infree(class))); | |
3282 | } | |
3283 | ||
3284 | /* And destroy them to get back the raw objects */ | |
3285 | if (top != NULL) | |
3286 | (void) cslab_free(class, top, 1); | |
3287 | break; | |
3288 | ||
3289 | default: | |
3290 | VERIFY(0); | |
3291 | /* NOTREACHED */ | |
3292 | } | |
3293 | ||
3294 | return (tot == num); | |
3295 | } | |
3296 | ||
3297 | static void | |
3298 | m_reclaim(mbuf_class_t class, unsigned int num, boolean_t comp) | |
3299 | { | |
3300 | int m, bmap = 0; | |
3301 | ||
3302 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
3303 | ||
3304 | VERIFY(m_total(MC_CL) <= m_maxlimit(MC_CL)); | |
3305 | VERIFY(m_total(MC_BIGCL) <= m_maxlimit(MC_BIGCL)); | |
3306 | VERIFY(m_total(MC_16KCL) <= m_maxlimit(MC_16KCL)); | |
3307 | ||
3308 | /* | |
3309 | * This logic can be made smarter; for now, simply mark | |
3310 | * all other related classes as potential victims. | |
3311 | */ | |
3312 | switch (class) { | |
3313 | case MC_MBUF: | |
3314 | m_wantpurge(MC_CL)++; | |
6d2010ae | 3315 | m_wantpurge(MC_BIGCL)++; |
2d21ac55 A |
3316 | m_wantpurge(MC_MBUF_CL)++; |
3317 | m_wantpurge(MC_MBUF_BIGCL)++; | |
3318 | break; | |
3319 | ||
3320 | case MC_CL: | |
3321 | m_wantpurge(MC_MBUF)++; | |
6d2010ae A |
3322 | m_wantpurge(MC_BIGCL)++; |
3323 | m_wantpurge(MC_MBUF_BIGCL)++; | |
2d21ac55 A |
3324 | if (!comp) |
3325 | m_wantpurge(MC_MBUF_CL)++; | |
3326 | break; | |
3327 | ||
3328 | case MC_BIGCL: | |
6d2010ae A |
3329 | m_wantpurge(MC_MBUF)++; |
3330 | m_wantpurge(MC_CL)++; | |
3331 | m_wantpurge(MC_MBUF_CL)++; | |
2d21ac55 A |
3332 | if (!comp) |
3333 | m_wantpurge(MC_MBUF_BIGCL)++; | |
3334 | break; | |
3335 | ||
3336 | case MC_16KCL: | |
3337 | if (!comp) | |
3338 | m_wantpurge(MC_MBUF_16KCL)++; | |
3339 | break; | |
3340 | ||
3341 | default: | |
3342 | VERIFY(0); | |
3343 | /* NOTREACHED */ | |
3344 | } | |
3345 | ||
3346 | /* | |
3347 | * Run through each marked class and check if we really need to | |
3348 | * purge (and therefore temporarily disable) the per-CPU caches | |
3349 | * layer used by the class. If so, remember the classes since | |
3350 | * we are going to drop the lock below prior to purging. | |
3351 | */ | |
3352 | for (m = 0; m < NELEM(mbuf_table); m++) { | |
3353 | if (m_wantpurge(m) > 0) { | |
3354 | m_wantpurge(m) = 0; | |
3355 | /* | |
3356 | * Try hard to steal the required number of objects | |
3357 | * from the freelist of other mbuf classes. Only | |
3358 | * purge and disable the per-CPU caches layer when | |
3359 | * we don't have enough; it's the last resort. | |
3360 | */ | |
3361 | if (!mbuf_steal(m, num)) | |
3362 | bmap |= (1 << m); | |
3363 | } | |
3364 | } | |
3365 | ||
3366 | lck_mtx_unlock(mbuf_mlock); | |
3367 | ||
3368 | if (bmap != 0) { | |
39236c6e A |
3369 | /* signal the domains to drain */ |
3370 | net_drain_domains(); | |
2d21ac55 A |
3371 | |
3372 | /* Sigh; we have no other choices but to ask mcache to purge */ | |
3373 | for (m = 0; m < NELEM(mbuf_table); m++) { | |
3374 | if ((bmap & (1 << m)) && | |
fe8ab488 | 3375 | mcache_purge_cache(m_cache(m), TRUE)) { |
2d21ac55 A |
3376 | lck_mtx_lock(mbuf_mlock); |
3377 | m_purge_cnt(m)++; | |
3378 | mbstat.m_drain++; | |
3379 | lck_mtx_unlock(mbuf_mlock); | |
3380 | } | |
3381 | } | |
3382 | } else { | |
3383 | /* | |
3384 | * Request mcache to reap extra elements from all of its caches; | |
3385 | * note that all reaps are serialized and happen only at a fixed | |
3386 | * interval. | |
3387 | */ | |
3388 | mcache_reap(); | |
3389 | } | |
3390 | lck_mtx_lock(mbuf_mlock); | |
3391 | } | |
3392 | ||
3393 | static inline struct mbuf * | |
3394 | m_get_common(int wait, short type, int hdr) | |
3395 | { | |
3396 | struct mbuf *m; | |
3397 | int mcflags = MSLEEPF(wait); | |
3398 | ||
3399 | /* Is this due to a non-blocking retry? If so, then try harder */ | |
3400 | if (mcflags & MCR_NOSLEEP) | |
3401 | mcflags |= MCR_TRYHARD; | |
3402 | ||
3403 | m = mcache_alloc(m_cache(MC_MBUF), mcflags); | |
3404 | if (m != NULL) { | |
3405 | MBUF_INIT(m, hdr, type); | |
3406 | mtype_stat_inc(type); | |
3407 | mtype_stat_dec(MT_FREE); | |
3408 | #if CONFIG_MACF_NET | |
3409 | if (hdr && mac_init_mbuf(m, wait) != 0) { | |
3410 | m_free(m); | |
3411 | return (NULL); | |
3412 | } | |
3413 | #endif /* MAC_NET */ | |
3414 | } | |
3415 | return (m); | |
3416 | } | |
3417 | ||
3418 | /* | |
3419 | * Space allocation routines; these are also available as macros | |
3420 | * for critical paths. | |
3421 | */ | |
3422 | #define _M_GET(wait, type) m_get_common(wait, type, 0) | |
3423 | #define _M_GETHDR(wait, type) m_get_common(wait, type, 1) | |
3424 | #define _M_RETRY(wait, type) _M_GET(wait, type) | |
3425 | #define _M_RETRYHDR(wait, type) _M_GETHDR(wait, type) | |
3426 | #define _MGET(m, how, type) ((m) = _M_GET(how, type)) | |
3427 | #define _MGETHDR(m, how, type) ((m) = _M_GETHDR(how, type)) | |
3428 | ||
3429 | struct mbuf * | |
3430 | m_get(int wait, int type) | |
3431 | { | |
3432 | return (_M_GET(wait, type)); | |
3433 | } | |
3434 | ||
3435 | struct mbuf * | |
3436 | m_gethdr(int wait, int type) | |
3437 | { | |
3438 | return (_M_GETHDR(wait, type)); | |
3439 | } | |
3440 | ||
3441 | struct mbuf * | |
3442 | m_retry(int wait, int type) | |
3443 | { | |
3444 | return (_M_RETRY(wait, type)); | |
3445 | } | |
3446 | ||
3447 | struct mbuf * | |
3448 | m_retryhdr(int wait, int type) | |
3449 | { | |
3450 | return (_M_RETRYHDR(wait, type)); | |
3451 | } | |
3452 | ||
3453 | struct mbuf * | |
3454 | m_getclr(int wait, int type) | |
3455 | { | |
3456 | struct mbuf *m; | |
3457 | ||
3458 | _MGET(m, wait, type); | |
3459 | if (m != NULL) | |
3460 | bzero(MTOD(m, caddr_t), MLEN); | |
3461 | return (m); | |
3462 | } | |
3463 | ||
39037602 A |
3464 | static int |
3465 | m_free_paired(struct mbuf *m) | |
3466 | { | |
3467 | VERIFY((m->m_flags & M_EXT) && (MEXT_FLAGS(m) & EXTF_PAIRED)); | |
3468 | ||
3469 | membar_sync(); | |
3470 | if (MEXT_PMBUF(m) == m) { | |
3471 | volatile UInt16 *addr = (volatile UInt16 *)&MEXT_PREF(m); | |
3472 | int16_t oprefcnt, prefcnt; | |
3473 | ||
3474 | /* | |
3475 | * Paired ref count might be negative in case we lose | |
3476 | * against another thread clearing MEXT_PMBUF, in the | |
3477 | * event it occurs after the above memory barrier sync. | |
3478 | * In that case just ignore as things have been unpaired. | |
3479 | */ | |
3480 | do { | |
3481 | oprefcnt = *addr; | |
3482 | prefcnt = oprefcnt - 1; | |
3483 | } while (!OSCompareAndSwap16(oprefcnt, prefcnt, addr)); | |
3484 | ||
3485 | if (prefcnt > 1) { | |
3486 | return (1); | |
3487 | } else if (prefcnt == 1) { | |
3488 | (*(m->m_ext.ext_free))(m->m_ext.ext_buf, | |
3489 | m->m_ext.ext_size, m->m_ext.ext_arg); | |
3490 | return (1); | |
3491 | } else if (prefcnt == 0) { | |
3492 | VERIFY(MBUF_IS_PAIRED(m)); | |
3493 | ||
3494 | /* | |
3495 | * Restore minref to its natural value, so that | |
3496 | * the caller will be able to free the cluster | |
3497 | * as appropriate. | |
3498 | */ | |
3499 | MEXT_MINREF(m) = 0; | |
3500 | ||
3501 | /* | |
3502 | * Clear MEXT_PMBUF, but leave EXTF_PAIRED intact | |
3503 | * as it is immutable. atomic_set_ptr also causes | |
3504 | * memory barrier sync. | |
3505 | */ | |
3506 | atomic_set_ptr(&MEXT_PMBUF(m), NULL); | |
3507 | ||
3508 | switch (m->m_ext.ext_size) { | |
3509 | case MCLBYTES: | |
3510 | m->m_ext.ext_free = NULL; | |
3511 | break; | |
3512 | ||
3513 | case MBIGCLBYTES: | |
3514 | m->m_ext.ext_free = m_bigfree; | |
3515 | break; | |
3516 | ||
3517 | case M16KCLBYTES: | |
3518 | m->m_ext.ext_free = m_16kfree; | |
3519 | break; | |
3520 | ||
3521 | default: | |
3522 | VERIFY(0); | |
3523 | /* NOTREACHED */ | |
3524 | } | |
3525 | } | |
3526 | } | |
3527 | ||
3528 | /* | |
3529 | * Tell caller the unpair has occurred, and that the reference | |
3530 | * count on the external cluster held for the paired mbuf should | |
3531 | * now be dropped. | |
3532 | */ | |
3533 | return (0); | |
3534 | } | |
3535 | ||
2d21ac55 A |
3536 | struct mbuf * |
3537 | m_free(struct mbuf *m) | |
3538 | { | |
3539 | struct mbuf *n = m->m_next; | |
3540 | ||
3541 | if (m->m_type == MT_FREE) | |
3542 | panic("m_free: freeing an already freed mbuf"); | |
3543 | ||
2d21ac55 | 3544 | if (m->m_flags & M_PKTHDR) { |
39236c6e A |
3545 | /* Check for scratch area overflow */ |
3546 | m_redzone_verify(m); | |
3547 | /* Free the aux data and tags if there is any */ | |
2d21ac55 | 3548 | m_tag_delete_chain(m, NULL); |
39037602 A |
3549 | |
3550 | m_do_tx_compl_callback(m, NULL); | |
2d21ac55 A |
3551 | } |
3552 | ||
3553 | if (m->m_flags & M_EXT) { | |
39037602 | 3554 | u_int16_t refcnt; |
6d2010ae | 3555 | u_int32_t composite; |
2d21ac55 | 3556 | |
39037602 A |
3557 | if (MBUF_IS_PAIRED(m) && m_free_paired(m)) |
3558 | return (n); | |
3559 | ||
2d21ac55 | 3560 | refcnt = m_decref(m); |
6d2010ae | 3561 | composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE); |
39037602 A |
3562 | |
3563 | if (refcnt == MEXT_MINREF(m) && !composite) { | |
2d21ac55 A |
3564 | if (m->m_ext.ext_free == NULL) { |
3565 | mcache_free(m_cache(MC_CL), m->m_ext.ext_buf); | |
3566 | } else if (m->m_ext.ext_free == m_bigfree) { | |
3567 | mcache_free(m_cache(MC_BIGCL), | |
3568 | m->m_ext.ext_buf); | |
3569 | } else if (m->m_ext.ext_free == m_16kfree) { | |
3570 | mcache_free(m_cache(MC_16KCL), | |
3571 | m->m_ext.ext_buf); | |
3572 | } else { | |
3573 | (*(m->m_ext.ext_free))(m->m_ext.ext_buf, | |
3574 | m->m_ext.ext_size, m->m_ext.ext_arg); | |
3575 | } | |
3576 | mcache_free(ref_cache, MEXT_RFA(m)); | |
3577 | MEXT_RFA(m) = NULL; | |
39037602 A |
3578 | } else if (refcnt == MEXT_MINREF(m) && composite) { |
3579 | VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED)); | |
2d21ac55 A |
3580 | VERIFY(m->m_type != MT_FREE); |
3581 | ||
3582 | mtype_stat_dec(m->m_type); | |
3583 | mtype_stat_inc(MT_FREE); | |
3584 | ||
3585 | m->m_type = MT_FREE; | |
3586 | m->m_flags = M_EXT; | |
3587 | m->m_len = 0; | |
3588 | m->m_next = m->m_nextpkt = NULL; | |
3589 | ||
6d2010ae A |
3590 | MEXT_FLAGS(m) &= ~EXTF_READONLY; |
3591 | ||
2d21ac55 A |
3592 | /* "Free" into the intermediate cache */ |
3593 | if (m->m_ext.ext_free == NULL) { | |
3594 | mcache_free(m_cache(MC_MBUF_CL), m); | |
3595 | } else if (m->m_ext.ext_free == m_bigfree) { | |
3596 | mcache_free(m_cache(MC_MBUF_BIGCL), m); | |
3597 | } else { | |
3598 | VERIFY(m->m_ext.ext_free == m_16kfree); | |
3599 | mcache_free(m_cache(MC_MBUF_16KCL), m); | |
3600 | } | |
3601 | return (n); | |
3602 | } | |
3603 | } | |
3604 | ||
3605 | if (m->m_type != MT_FREE) { | |
3606 | mtype_stat_dec(m->m_type); | |
3607 | mtype_stat_inc(MT_FREE); | |
3608 | } | |
3609 | ||
3610 | m->m_type = MT_FREE; | |
3611 | m->m_flags = m->m_len = 0; | |
3612 | m->m_next = m->m_nextpkt = NULL; | |
3613 | ||
3614 | mcache_free(m_cache(MC_MBUF), m); | |
3615 | ||
3616 | return (n); | |
3617 | } | |
3618 | ||
3619 | __private_extern__ struct mbuf * | |
3620 | m_clattach(struct mbuf *m, int type, caddr_t extbuf, | |
3621 | void (*extfree)(caddr_t, u_int, caddr_t), u_int extsize, caddr_t extarg, | |
39037602 | 3622 | int wait, int pair) |
2d21ac55 A |
3623 | { |
3624 | struct ext_ref *rfa = NULL; | |
3625 | ||
39037602 A |
3626 | /* |
3627 | * If pairing is requested and an existing mbuf is provided, reject | |
3628 | * it if it's already been paired to another cluster. Otherwise, | |
3629 | * allocate a new one or free any existing below. | |
3630 | */ | |
3631 | if ((m != NULL && MBUF_IS_PAIRED(m)) || | |
3632 | (m == NULL && (m = _M_GETHDR(wait, type)) == NULL)) | |
2d21ac55 A |
3633 | return (NULL); |
3634 | ||
3635 | if (m->m_flags & M_EXT) { | |
39037602 | 3636 | u_int16_t refcnt; |
6d2010ae | 3637 | u_int32_t composite; |
2d21ac55 A |
3638 | |
3639 | refcnt = m_decref(m); | |
6d2010ae | 3640 | composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE); |
39037602 A |
3641 | VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED) && MEXT_PMBUF(m) == NULL); |
3642 | if (refcnt == MEXT_MINREF(m) && !composite) { | |
2d21ac55 A |
3643 | if (m->m_ext.ext_free == NULL) { |
3644 | mcache_free(m_cache(MC_CL), m->m_ext.ext_buf); | |
3645 | } else if (m->m_ext.ext_free == m_bigfree) { | |
3646 | mcache_free(m_cache(MC_BIGCL), | |
3647 | m->m_ext.ext_buf); | |
3648 | } else if (m->m_ext.ext_free == m_16kfree) { | |
3649 | mcache_free(m_cache(MC_16KCL), | |
3650 | m->m_ext.ext_buf); | |
3651 | } else { | |
3652 | (*(m->m_ext.ext_free))(m->m_ext.ext_buf, | |
3653 | m->m_ext.ext_size, m->m_ext.ext_arg); | |
3654 | } | |
3655 | /* Re-use the reference structure */ | |
3656 | rfa = MEXT_RFA(m); | |
39037602 | 3657 | } else if (refcnt == MEXT_MINREF(m) && composite) { |
2d21ac55 A |
3658 | VERIFY(m->m_type != MT_FREE); |
3659 | ||
3660 | mtype_stat_dec(m->m_type); | |
3661 | mtype_stat_inc(MT_FREE); | |
3662 | ||
3663 | m->m_type = MT_FREE; | |
3664 | m->m_flags = M_EXT; | |
3665 | m->m_len = 0; | |
3666 | m->m_next = m->m_nextpkt = NULL; | |
6d2010ae A |
3667 | |
3668 | MEXT_FLAGS(m) &= ~EXTF_READONLY; | |
3669 | ||
2d21ac55 A |
3670 | /* "Free" into the intermediate cache */ |
3671 | if (m->m_ext.ext_free == NULL) { | |
3672 | mcache_free(m_cache(MC_MBUF_CL), m); | |
3673 | } else if (m->m_ext.ext_free == m_bigfree) { | |
3674 | mcache_free(m_cache(MC_MBUF_BIGCL), m); | |
3675 | } else { | |
3676 | VERIFY(m->m_ext.ext_free == m_16kfree); | |
3677 | mcache_free(m_cache(MC_MBUF_16KCL), m); | |
3678 | } | |
3679 | /* | |
3680 | * Allocate a new mbuf, since we didn't divorce | |
3681 | * the composite mbuf + cluster pair above. | |
3682 | */ | |
3683 | if ((m = _M_GETHDR(wait, type)) == NULL) | |
3684 | return (NULL); | |
3685 | } | |
3686 | } | |
3687 | ||
3688 | if (rfa == NULL && | |
3689 | (rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) { | |
3690 | m_free(m); | |
3691 | return (NULL); | |
3692 | } | |
3693 | ||
39037602 A |
3694 | if (!pair) { |
3695 | MEXT_INIT(m, extbuf, extsize, extfree, extarg, rfa, | |
3696 | 0, 1, 0, 0, 0, NULL); | |
3697 | } else { | |
3698 | MEXT_INIT(m, extbuf, extsize, extfree, (caddr_t)m, rfa, | |
3699 | 1, 1, 1, EXTF_PAIRED, 0, m); | |
3700 | } | |
2d21ac55 A |
3701 | |
3702 | return (m); | |
3703 | } | |
3704 | ||
b0d623f7 A |
3705 | /* |
3706 | * Perform `fast' allocation mbuf clusters from a cache of recently-freed | |
3707 | * clusters. (If the cache is empty, new clusters are allocated en-masse.) | |
3708 | */ | |
3709 | struct mbuf * | |
3710 | m_getcl(int wait, int type, int flags) | |
3711 | { | |
3712 | struct mbuf *m; | |
3713 | int mcflags = MSLEEPF(wait); | |
3714 | int hdr = (flags & M_PKTHDR); | |
3715 | ||
3716 | /* Is this due to a non-blocking retry? If so, then try harder */ | |
3717 | if (mcflags & MCR_NOSLEEP) | |
3718 | mcflags |= MCR_TRYHARD; | |
3719 | ||
6d2010ae A |
3720 | m = mcache_alloc(m_cache(MC_MBUF_CL), mcflags); |
3721 | if (m != NULL) { | |
39037602 | 3722 | u_int16_t flag; |
6d2010ae A |
3723 | struct ext_ref *rfa; |
3724 | void *cl; | |
3725 | ||
3726 | VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT); | |
3727 | cl = m->m_ext.ext_buf; | |
3728 | rfa = MEXT_RFA(m); | |
3729 | ||
3730 | ASSERT(cl != NULL && rfa != NULL); | |
3731 | VERIFY(MBUF_IS_COMPOSITE(m) && m->m_ext.ext_free == NULL); | |
3732 | ||
3733 | flag = MEXT_FLAGS(m); | |
3734 | ||
b0d623f7 | 3735 | MBUF_INIT(m, hdr, type); |
6d2010ae A |
3736 | MBUF_CL_INIT(m, cl, rfa, 1, flag); |
3737 | ||
b0d623f7 A |
3738 | mtype_stat_inc(type); |
3739 | mtype_stat_dec(MT_FREE); | |
3740 | #if CONFIG_MACF_NET | |
3741 | if (hdr && mac_init_mbuf(m, wait) != 0) { | |
6d2010ae | 3742 | m_freem(m); |
b0d623f7 A |
3743 | return (NULL); |
3744 | } | |
3745 | #endif /* MAC_NET */ | |
3746 | } | |
3747 | return (m); | |
3748 | } | |
3749 | ||
2d21ac55 A |
3750 | /* m_mclget() add an mbuf cluster to a normal mbuf */ |
3751 | struct mbuf * | |
3752 | m_mclget(struct mbuf *m, int wait) | |
3753 | { | |
3754 | struct ext_ref *rfa; | |
3755 | ||
3756 | if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) | |
3757 | return (m); | |
3758 | ||
3759 | m->m_ext.ext_buf = m_mclalloc(wait); | |
3760 | if (m->m_ext.ext_buf != NULL) { | |
3761 | MBUF_CL_INIT(m, m->m_ext.ext_buf, rfa, 1, 0); | |
3762 | } else { | |
3763 | mcache_free(ref_cache, rfa); | |
3764 | } | |
3765 | return (m); | |
3766 | } | |
3767 | ||
3768 | /* Allocate an mbuf cluster */ | |
3769 | caddr_t | |
3770 | m_mclalloc(int wait) | |
3771 | { | |
3772 | int mcflags = MSLEEPF(wait); | |
3773 | ||
3774 | /* Is this due to a non-blocking retry? If so, then try harder */ | |
3775 | if (mcflags & MCR_NOSLEEP) | |
3776 | mcflags |= MCR_TRYHARD; | |
3777 | ||
3778 | return (mcache_alloc(m_cache(MC_CL), mcflags)); | |
3779 | } | |
3780 | ||
3781 | /* Free an mbuf cluster */ | |
3782 | void | |
3783 | m_mclfree(caddr_t p) | |
3784 | { | |
3785 | mcache_free(m_cache(MC_CL), p); | |
3786 | } | |
3787 | ||
3788 | /* | |
3789 | * mcl_hasreference() checks if a cluster of an mbuf is referenced by | |
6d2010ae | 3790 | * another mbuf; see comments in m_incref() regarding EXTF_READONLY. |
2d21ac55 A |
3791 | */ |
3792 | int | |
3793 | m_mclhasreference(struct mbuf *m) | |
3794 | { | |
3795 | if (!(m->m_flags & M_EXT)) | |
3796 | return (0); | |
9bccf70c | 3797 | |
2d21ac55 A |
3798 | ASSERT(MEXT_RFA(m) != NULL); |
3799 | ||
6d2010ae | 3800 | return ((MEXT_FLAGS(m) & EXTF_READONLY) ? 1 : 0); |
9bccf70c A |
3801 | } |
3802 | ||
2d21ac55 A |
3803 | __private_extern__ caddr_t |
3804 | m_bigalloc(int wait) | |
9bccf70c | 3805 | { |
2d21ac55 | 3806 | int mcflags = MSLEEPF(wait); |
91447636 | 3807 | |
2d21ac55 A |
3808 | /* Is this due to a non-blocking retry? If so, then try harder */ |
3809 | if (mcflags & MCR_NOSLEEP) | |
3810 | mcflags |= MCR_TRYHARD; | |
91447636 | 3811 | |
2d21ac55 | 3812 | return (mcache_alloc(m_cache(MC_BIGCL), mcflags)); |
9bccf70c A |
3813 | } |
3814 | ||
2d21ac55 A |
3815 | __private_extern__ void |
3816 | m_bigfree(caddr_t p, __unused u_int size, __unused caddr_t arg) | |
9bccf70c | 3817 | { |
2d21ac55 | 3818 | mcache_free(m_cache(MC_BIGCL), p); |
9bccf70c A |
3819 | } |
3820 | ||
2d21ac55 A |
3821 | /* m_mbigget() add an 4KB mbuf cluster to a normal mbuf */ |
3822 | __private_extern__ struct mbuf * | |
3823 | m_mbigget(struct mbuf *m, int wait) | |
3824 | { | |
3825 | struct ext_ref *rfa; | |
3826 | ||
3827 | if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) | |
3828 | return (m); | |
3829 | ||
3830 | m->m_ext.ext_buf = m_bigalloc(wait); | |
3831 | if (m->m_ext.ext_buf != NULL) { | |
3832 | MBUF_BIGCL_INIT(m, m->m_ext.ext_buf, rfa, 1, 0); | |
91447636 | 3833 | } else { |
2d21ac55 | 3834 | mcache_free(ref_cache, rfa); |
91447636 | 3835 | } |
2d21ac55 A |
3836 | return (m); |
3837 | } | |
3838 | ||
3839 | __private_extern__ caddr_t | |
3840 | m_16kalloc(int wait) | |
3841 | { | |
3842 | int mcflags = MSLEEPF(wait); | |
3843 | ||
3844 | /* Is this due to a non-blocking retry? If so, then try harder */ | |
3845 | if (mcflags & MCR_NOSLEEP) | |
3846 | mcflags |= MCR_TRYHARD; | |
3847 | ||
3848 | return (mcache_alloc(m_cache(MC_16KCL), mcflags)); | |
91447636 A |
3849 | } |
3850 | ||
3851 | __private_extern__ void | |
2d21ac55 | 3852 | m_16kfree(caddr_t p, __unused u_int size, __unused caddr_t arg) |
91447636 | 3853 | { |
2d21ac55 | 3854 | mcache_free(m_cache(MC_16KCL), p); |
91447636 A |
3855 | } |
3856 | ||
2d21ac55 | 3857 | /* m_m16kget() add a 16KB mbuf cluster to a normal mbuf */ |
91447636 | 3858 | __private_extern__ struct mbuf * |
2d21ac55 | 3859 | m_m16kget(struct mbuf *m, int wait) |
91447636 | 3860 | { |
2d21ac55 A |
3861 | struct ext_ref *rfa; |
3862 | ||
3863 | if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) | |
3864 | return (m); | |
3865 | ||
3866 | m->m_ext.ext_buf = m_16kalloc(wait); | |
3867 | if (m->m_ext.ext_buf != NULL) { | |
3868 | MBUF_16KCL_INIT(m, m->m_ext.ext_buf, rfa, 1, 0); | |
3869 | } else { | |
3870 | mcache_free(ref_cache, rfa); | |
91447636 | 3871 | } |
2d21ac55 | 3872 | return (m); |
91447636 A |
3873 | } |
3874 | ||
b0d623f7 A |
3875 | /* |
3876 | * "Move" mbuf pkthdr from "from" to "to". | |
3877 | * "from" must have M_PKTHDR set, and "to" must be empty. | |
3878 | */ | |
9bccf70c | 3879 | void |
2d21ac55 | 3880 | m_copy_pkthdr(struct mbuf *to, struct mbuf *from) |
9bccf70c | 3881 | { |
39236c6e A |
3882 | VERIFY(from->m_flags & M_PKTHDR); |
3883 | ||
3884 | /* Check for scratch area overflow */ | |
3885 | m_redzone_verify(from); | |
3886 | ||
3887 | if (to->m_flags & M_PKTHDR) { | |
3888 | /* Check for scratch area overflow */ | |
3889 | m_redzone_verify(to); | |
3890 | /* We will be taking over the tags of 'to' */ | |
2d21ac55 | 3891 | m_tag_delete_chain(to, NULL); |
39236c6e | 3892 | } |
2d21ac55 | 3893 | to->m_pkthdr = from->m_pkthdr; /* especially tags */ |
39236c6e A |
3894 | m_classifier_init(from, 0); /* purge classifier info */ |
3895 | m_tag_init(from, 1); /* purge all tags from src */ | |
3896 | m_scratch_init(from); /* clear src scratch area */ | |
935ed37a A |
3897 | to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); |
3898 | if ((to->m_flags & M_EXT) == 0) | |
3899 | to->m_data = to->m_pktdat; | |
39236c6e | 3900 | m_redzone_init(to); /* setup red zone on dst */ |
9bccf70c A |
3901 | } |
3902 | ||
91447636 A |
3903 | /* |
3904 | * Duplicate "from"'s mbuf pkthdr in "to". | |
3905 | * "from" must have M_PKTHDR set, and "to" must be empty. | |
3906 | * In particular, this does a deep copy of the packet tags. | |
3907 | */ | |
3a60a9f5 | 3908 | static int |
91447636 A |
3909 | m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how) |
3910 | { | |
39236c6e A |
3911 | VERIFY(from->m_flags & M_PKTHDR); |
3912 | ||
3913 | /* Check for scratch area overflow */ | |
3914 | m_redzone_verify(from); | |
3915 | ||
3916 | if (to->m_flags & M_PKTHDR) { | |
3917 | /* Check for scratch area overflow */ | |
3918 | m_redzone_verify(to); | |
3919 | /* We will be taking over the tags of 'to' */ | |
2d21ac55 | 3920 | m_tag_delete_chain(to, NULL); |
39236c6e | 3921 | } |
2d21ac55 A |
3922 | to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); |
3923 | if ((to->m_flags & M_EXT) == 0) | |
3924 | to->m_data = to->m_pktdat; | |
3925 | to->m_pkthdr = from->m_pkthdr; | |
39236c6e A |
3926 | m_redzone_init(to); /* setup red zone on dst */ |
3927 | m_tag_init(to, 0); /* preserve dst static tags */ | |
2d21ac55 | 3928 | return (m_tag_copy_chain(to, from, how)); |
91447636 | 3929 | } |
fa4905b1 | 3930 | |
316670eb A |
3931 | void |
3932 | m_copy_pftag(struct mbuf *to, struct mbuf *from) | |
3933 | { | |
39037602 | 3934 | memcpy(m_pftag(to), m_pftag(from), sizeof(struct pf_mtag)); |
39236c6e | 3935 | #if PF_ECN |
39037602 A |
3936 | m_pftag(to)->pftag_hdr = NULL; |
3937 | m_pftag(to)->pftag_flags &= ~(PF_TAG_HDR_INET|PF_TAG_HDR_INET6); | |
39236c6e A |
3938 | #endif /* PF_ECN */ |
3939 | } | |
3940 | ||
3941 | void | |
3942 | m_classifier_init(struct mbuf *m, uint32_t pktf_mask) | |
3943 | { | |
3944 | VERIFY(m->m_flags & M_PKTHDR); | |
3945 | ||
3946 | m->m_pkthdr.pkt_proto = 0; | |
3947 | m->m_pkthdr.pkt_flowsrc = 0; | |
3948 | m->m_pkthdr.pkt_flowid = 0; | |
3949 | m->m_pkthdr.pkt_flags &= pktf_mask; /* caller-defined mask */ | |
3950 | /* preserve service class and interface info for loopback packets */ | |
3951 | if (!(m->m_pkthdr.pkt_flags & PKTF_LOOP)) | |
3952 | (void) m_set_service_class(m, MBUF_SC_BE); | |
3953 | if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) | |
3954 | m->m_pkthdr.pkt_ifainfo = 0; | |
3955 | #if MEASURE_BW | |
3956 | m->m_pkthdr.pkt_bwseq = 0; | |
3957 | #endif /* MEASURE_BW */ | |
39037602 | 3958 | m->m_pkthdr.pkt_timestamp = 0; |
39236c6e A |
3959 | } |
3960 | ||
3961 | void | |
3962 | m_copy_classifier(struct mbuf *to, struct mbuf *from) | |
3963 | { | |
3964 | VERIFY(to->m_flags & M_PKTHDR); | |
3965 | VERIFY(from->m_flags & M_PKTHDR); | |
3966 | ||
3967 | to->m_pkthdr.pkt_proto = from->m_pkthdr.pkt_proto; | |
3968 | to->m_pkthdr.pkt_flowsrc = from->m_pkthdr.pkt_flowsrc; | |
3969 | to->m_pkthdr.pkt_flowid = from->m_pkthdr.pkt_flowid; | |
3970 | to->m_pkthdr.pkt_flags = from->m_pkthdr.pkt_flags; | |
3971 | (void) m_set_service_class(to, from->m_pkthdr.pkt_svc); | |
3972 | to->m_pkthdr.pkt_ifainfo = from->m_pkthdr.pkt_ifainfo; | |
39236c6e A |
3973 | #if MEASURE_BW |
3974 | to->m_pkthdr.pkt_bwseq = from->m_pkthdr.pkt_bwseq; | |
3975 | #endif /* MEASURE_BW */ | |
316670eb A |
3976 | } |
3977 | ||
9bccf70c | 3978 | /* |
2d21ac55 A |
3979 | * Return a list of mbuf hdrs that point to clusters. Try for num_needed; |
3980 | * if wantall is not set, return whatever number were available. Set up the | |
3981 | * first num_with_pkthdrs with mbuf hdrs configured as packet headers; these | |
3982 | * are chained on the m_nextpkt field. Any packets requested beyond this | |
3983 | * are chained onto the last packet header's m_next field. The size of | |
3984 | * the cluster is controlled by the parameter bufsize. | |
9bccf70c | 3985 | */ |
91447636 | 3986 | __private_extern__ struct mbuf * |
2d21ac55 A |
3987 | m_getpackets_internal(unsigned int *num_needed, int num_with_pkthdrs, |
3988 | int wait, int wantall, size_t bufsize) | |
fa4905b1 A |
3989 | { |
3990 | struct mbuf *m; | |
3991 | struct mbuf **np, *top; | |
2d21ac55 A |
3992 | unsigned int pnum, needed = *num_needed; |
3993 | mcache_obj_t *mp_list = NULL; | |
3994 | int mcflags = MSLEEPF(wait); | |
39037602 | 3995 | u_int16_t flag; |
2d21ac55 A |
3996 | struct ext_ref *rfa; |
3997 | mcache_t *cp; | |
3998 | void *cl; | |
3999 | ||
4000 | ASSERT(bufsize == m_maxsize(MC_CL) || | |
4001 | bufsize == m_maxsize(MC_BIGCL) || | |
4002 | bufsize == m_maxsize(MC_16KCL)); | |
4003 | ||
4004 | /* | |
4005 | * Caller must first check for njcl because this | |
4006 | * routine is internal and not exposed/used via KPI. | |
4007 | */ | |
4008 | VERIFY(bufsize != m_maxsize(MC_16KCL) || njcl > 0); | |
4009 | ||
fa4905b1 A |
4010 | top = NULL; |
4011 | np = ⊤ | |
2d21ac55 | 4012 | pnum = 0; |
fa4905b1 | 4013 | |
2d21ac55 A |
4014 | /* |
4015 | * The caller doesn't want all the requested buffers; only some. | |
4016 | * Try hard to get what we can, but don't block. This effectively | |
4017 | * overrides MCR_SLEEP, since this thread will not go to sleep | |
4018 | * if we can't get all the buffers. | |
4019 | */ | |
4020 | if (!wantall || (mcflags & MCR_NOSLEEP)) | |
4021 | mcflags |= MCR_TRYHARD; | |
4022 | ||
4023 | /* Allocate the composite mbuf + cluster elements from the cache */ | |
4024 | if (bufsize == m_maxsize(MC_CL)) | |
4025 | cp = m_cache(MC_MBUF_CL); | |
4026 | else if (bufsize == m_maxsize(MC_BIGCL)) | |
4027 | cp = m_cache(MC_MBUF_BIGCL); | |
4028 | else | |
4029 | cp = m_cache(MC_MBUF_16KCL); | |
4030 | needed = mcache_alloc_ext(cp, &mp_list, needed, mcflags); | |
4031 | ||
4032 | for (pnum = 0; pnum < needed; pnum++) { | |
4033 | m = (struct mbuf *)mp_list; | |
4034 | mp_list = mp_list->obj_next; | |
4035 | ||
4036 | VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT); | |
4037 | cl = m->m_ext.ext_buf; | |
4038 | rfa = MEXT_RFA(m); | |
4039 | ||
4040 | ASSERT(cl != NULL && rfa != NULL); | |
4041 | VERIFY(MBUF_IS_COMPOSITE(m)); | |
4042 | ||
4043 | flag = MEXT_FLAGS(m); | |
4044 | ||
4045 | MBUF_INIT(m, num_with_pkthdrs, MT_DATA); | |
4046 | if (bufsize == m_maxsize(MC_16KCL)) { | |
4047 | MBUF_16KCL_INIT(m, cl, rfa, 1, flag); | |
4048 | } else if (bufsize == m_maxsize(MC_BIGCL)) { | |
4049 | MBUF_BIGCL_INIT(m, cl, rfa, 1, flag); | |
91447636 | 4050 | } else { |
2d21ac55 A |
4051 | MBUF_CL_INIT(m, cl, rfa, 1, flag); |
4052 | } | |
4053 | ||
4054 | if (num_with_pkthdrs > 0) { | |
4055 | --num_with_pkthdrs; | |
4056 | #if CONFIG_MACF_NET | |
4057 | if (mac_mbuf_label_init(m, wait) != 0) { | |
6d2010ae | 4058 | m_freem(m); |
2d21ac55 | 4059 | break; |
91447636 | 4060 | } |
2d21ac55 | 4061 | #endif /* MAC_NET */ |
91447636 | 4062 | } |
2d21ac55 A |
4063 | |
4064 | *np = m; | |
4065 | if (num_with_pkthdrs > 0) | |
91447636 A |
4066 | np = &m->m_nextpkt; |
4067 | else | |
4068 | np = &m->m_next; | |
4069 | } | |
2d21ac55 A |
4070 | ASSERT(pnum != *num_needed || mp_list == NULL); |
4071 | if (mp_list != NULL) | |
4072 | mcache_free_ext(cp, mp_list); | |
4073 | ||
4074 | if (pnum > 0) { | |
4075 | mtype_stat_add(MT_DATA, pnum); | |
4076 | mtype_stat_sub(MT_FREE, pnum); | |
4077 | } | |
4078 | ||
4079 | if (wantall && (pnum != *num_needed)) { | |
4080 | if (top != NULL) | |
4081 | m_freem_list(top); | |
4082 | return (NULL); | |
91447636 | 4083 | } |
fa4905b1 | 4084 | |
316670eb A |
4085 | if (pnum > *num_needed) { |
4086 | printf("%s: File a radar related to <rdar://10146739>. \ | |
4087 | needed = %u, pnum = %u, num_needed = %u \n", | |
4088 | __func__, needed, pnum, *num_needed); | |
39037602 | 4089 | } |
316670eb | 4090 | |
2d21ac55 A |
4091 | *num_needed = pnum; |
4092 | return (top); | |
4093 | } | |
fa4905b1 | 4094 | |
91447636 | 4095 | /* |
2d21ac55 A |
4096 | * Return list of mbuf linked by m_nextpkt. Try for numlist, and if |
4097 | * wantall is not set, return whatever number were available. The size of | |
4098 | * each mbuf in the list is controlled by the parameter packetlen. Each | |
4099 | * mbuf of the list may have a chain of mbufs linked by m_next. Each mbuf | |
4100 | * in the chain is called a segment. If maxsegments is not null and the | |
4101 | * value pointed to is not null, this specify the maximum number of segments | |
4102 | * for a chain of mbufs. If maxsegments is zero or the value pointed to | |
4103 | * is zero the caller does not have any restriction on the number of segments. | |
4104 | * The actual number of segments of a mbuf chain is return in the value | |
4105 | * pointed to by maxsegments. | |
91447636 | 4106 | */ |
91447636 | 4107 | __private_extern__ struct mbuf * |
2d21ac55 A |
4108 | m_allocpacket_internal(unsigned int *numlist, size_t packetlen, |
4109 | unsigned int *maxsegments, int wait, int wantall, size_t wantsize) | |
91447636 | 4110 | { |
2d21ac55 A |
4111 | struct mbuf **np, *top, *first = NULL; |
4112 | size_t bufsize, r_bufsize; | |
4113 | unsigned int num = 0; | |
4114 | unsigned int nsegs = 0; | |
4115 | unsigned int needed, resid; | |
4116 | int mcflags = MSLEEPF(wait); | |
4117 | mcache_obj_t *mp_list = NULL, *rmp_list = NULL; | |
4118 | mcache_t *cp = NULL, *rcp = NULL; | |
4119 | ||
4120 | if (*numlist == 0) | |
4121 | return (NULL); | |
fa4905b1 | 4122 | |
91447636 A |
4123 | top = NULL; |
4124 | np = ⊤ | |
2d21ac55 | 4125 | |
91447636 | 4126 | if (wantsize == 0) { |
2d21ac55 | 4127 | if (packetlen <= MINCLSIZE) { |
91447636 | 4128 | bufsize = packetlen; |
2d21ac55 A |
4129 | } else if (packetlen > m_maxsize(MC_CL)) { |
4130 | /* Use 4KB if jumbo cluster pool isn't available */ | |
4131 | if (packetlen <= m_maxsize(MC_BIGCL) || njcl == 0) | |
4132 | bufsize = m_maxsize(MC_BIGCL); | |
4133 | else | |
4134 | bufsize = m_maxsize(MC_16KCL); | |
4135 | } else { | |
4136 | bufsize = m_maxsize(MC_CL); | |
4137 | } | |
4138 | } else if (wantsize == m_maxsize(MC_CL) || | |
4139 | wantsize == m_maxsize(MC_BIGCL) || | |
4140 | (wantsize == m_maxsize(MC_16KCL) && njcl > 0)) { | |
91447636 | 4141 | bufsize = wantsize; |
2d21ac55 A |
4142 | } else { |
4143 | return (NULL); | |
4144 | } | |
91447636 A |
4145 | |
4146 | if (bufsize <= MHLEN) { | |
2d21ac55 | 4147 | nsegs = 1; |
91447636 A |
4148 | } else if (bufsize <= MINCLSIZE) { |
4149 | if (maxsegments != NULL && *maxsegments == 1) { | |
2d21ac55 A |
4150 | bufsize = m_maxsize(MC_CL); |
4151 | nsegs = 1; | |
91447636 | 4152 | } else { |
2d21ac55 | 4153 | nsegs = 2; |
fa4905b1 | 4154 | } |
2d21ac55 A |
4155 | } else if (bufsize == m_maxsize(MC_16KCL)) { |
4156 | VERIFY(njcl > 0); | |
3e170ce0 | 4157 | nsegs = ((packetlen - 1) >> M16KCLSHIFT) + 1; |
2d21ac55 | 4158 | } else if (bufsize == m_maxsize(MC_BIGCL)) { |
3e170ce0 | 4159 | nsegs = ((packetlen - 1) >> MBIGCLSHIFT) + 1; |
91447636 | 4160 | } else { |
2d21ac55 | 4161 | nsegs = ((packetlen - 1) >> MCLSHIFT) + 1; |
91447636 A |
4162 | } |
4163 | if (maxsegments != NULL) { | |
2d21ac55 A |
4164 | if (*maxsegments && nsegs > *maxsegments) { |
4165 | *maxsegments = nsegs; | |
4166 | return (NULL); | |
91447636 | 4167 | } |
2d21ac55 | 4168 | *maxsegments = nsegs; |
91447636 | 4169 | } |
91447636 | 4170 | |
2d21ac55 A |
4171 | /* |
4172 | * The caller doesn't want all the requested buffers; only some. | |
4173 | * Try hard to get what we can, but don't block. This effectively | |
4174 | * overrides MCR_SLEEP, since this thread will not go to sleep | |
4175 | * if we can't get all the buffers. | |
4176 | */ | |
4177 | if (!wantall || (mcflags & MCR_NOSLEEP)) | |
4178 | mcflags |= MCR_TRYHARD; | |
4179 | ||
4180 | /* | |
4181 | * Simple case where all elements in the lists/chains are mbufs. | |
4182 | * Unless bufsize is greater than MHLEN, each segment chain is made | |
4183 | * up of exactly 1 mbuf. Otherwise, each segment chain is made up | |
4184 | * of 2 mbufs; the second one is used for the residual data, i.e. | |
4185 | * the remaining data that cannot fit into the first mbuf. | |
4186 | */ | |
4187 | if (bufsize <= MINCLSIZE) { | |
4188 | /* Allocate the elements in one shot from the mbuf cache */ | |
4189 | ASSERT(bufsize <= MHLEN || nsegs == 2); | |
4190 | cp = m_cache(MC_MBUF); | |
4191 | needed = mcache_alloc_ext(cp, &mp_list, | |
4192 | (*numlist) * nsegs, mcflags); | |
4193 | ||
4194 | /* | |
4195 | * The number of elements must be even if we are to use an | |
4196 | * mbuf (instead of a cluster) to store the residual data. | |
4197 | * If we couldn't allocate the requested number of mbufs, | |
4198 | * trim the number down (if it's odd) in order to avoid | |
4199 | * creating a partial segment chain. | |
4200 | */ | |
4201 | if (bufsize > MHLEN && (needed & 0x1)) | |
4202 | needed--; | |
91447636 | 4203 | |
2d21ac55 A |
4204 | while (num < needed) { |
4205 | struct mbuf *m; | |
91447636 | 4206 | |
2d21ac55 A |
4207 | m = (struct mbuf *)mp_list; |
4208 | mp_list = mp_list->obj_next; | |
4209 | ASSERT(m != NULL); | |
91447636 | 4210 | |
2d21ac55 A |
4211 | MBUF_INIT(m, 1, MT_DATA); |
4212 | #if CONFIG_MACF_NET | |
4213 | if (mac_init_mbuf(m, wait) != 0) { | |
4214 | m_free(m); | |
4215 | break; | |
91447636 | 4216 | } |
2d21ac55 A |
4217 | #endif /* MAC_NET */ |
4218 | num++; | |
4219 | if (bufsize > MHLEN) { | |
4220 | /* A second mbuf for this segment chain */ | |
4221 | m->m_next = (struct mbuf *)mp_list; | |
4222 | mp_list = mp_list->obj_next; | |
4223 | ASSERT(m->m_next != NULL); | |
4224 | ||
4225 | MBUF_INIT(m->m_next, 0, MT_DATA); | |
4226 | num++; | |
91447636 | 4227 | } |
2d21ac55 A |
4228 | *np = m; |
4229 | np = &m->m_nextpkt; | |
4230 | } | |
4231 | ASSERT(num != *numlist || mp_list == NULL); | |
4232 | ||
4233 | if (num > 0) { | |
4234 | mtype_stat_add(MT_DATA, num); | |
4235 | mtype_stat_sub(MT_FREE, num); | |
4236 | } | |
4237 | num /= nsegs; | |
4238 | ||
4239 | /* We've got them all; return to caller */ | |
4240 | if (num == *numlist) | |
4241 | return (top); | |
4242 | ||
4243 | goto fail; | |
4244 | } | |
4245 | ||
4246 | /* | |
4247 | * Complex cases where elements are made up of one or more composite | |
4248 | * mbufs + cluster, depending on packetlen. Each N-segment chain can | |
4249 | * be illustrated as follows: | |
4250 | * | |
4251 | * [mbuf + cluster 1] [mbuf + cluster 2] ... [mbuf + cluster N] | |
4252 | * | |
4253 | * Every composite mbuf + cluster element comes from the intermediate | |
4254 | * cache (either MC_MBUF_CL or MC_MBUF_BIGCL). For space efficiency, | |
4255 | * the last composite element will come from the MC_MBUF_CL cache, | |
4256 | * unless the residual data is larger than 2KB where we use the | |
4257 | * big cluster composite cache (MC_MBUF_BIGCL) instead. Residual | |
4258 | * data is defined as extra data beyond the first element that cannot | |
4259 | * fit into the previous element, i.e. there is no residual data if | |
4260 | * the chain only has 1 segment. | |
4261 | */ | |
4262 | r_bufsize = bufsize; | |
4263 | resid = packetlen > bufsize ? packetlen % bufsize : 0; | |
4264 | if (resid > 0) { | |
4265 | /* There is residual data; figure out the cluster size */ | |
4266 | if (wantsize == 0 && packetlen > MINCLSIZE) { | |
4267 | /* | |
4268 | * Caller didn't request that all of the segments | |
4269 | * in the chain use the same cluster size; use the | |
4270 | * smaller of the cluster sizes. | |
4271 | */ | |
4272 | if (njcl > 0 && resid > m_maxsize(MC_BIGCL)) | |
4273 | r_bufsize = m_maxsize(MC_16KCL); | |
4274 | else if (resid > m_maxsize(MC_CL)) | |
4275 | r_bufsize = m_maxsize(MC_BIGCL); | |
4276 | else | |
4277 | r_bufsize = m_maxsize(MC_CL); | |
4278 | } else { | |
4279 | /* Use the same cluster size as the other segments */ | |
4280 | resid = 0; | |
4281 | } | |
4282 | } | |
4283 | ||
4284 | needed = *numlist; | |
4285 | if (resid > 0) { | |
4286 | /* | |
4287 | * Attempt to allocate composite mbuf + cluster elements for | |
4288 | * the residual data in each chain; record the number of such | |
4289 | * elements that can be allocated so that we know how many | |
4290 | * segment chains we can afford to create. | |
4291 | */ | |
4292 | if (r_bufsize <= m_maxsize(MC_CL)) | |
4293 | rcp = m_cache(MC_MBUF_CL); | |
4294 | else if (r_bufsize <= m_maxsize(MC_BIGCL)) | |
4295 | rcp = m_cache(MC_MBUF_BIGCL); | |
4296 | else | |
4297 | rcp = m_cache(MC_MBUF_16KCL); | |
4298 | needed = mcache_alloc_ext(rcp, &rmp_list, *numlist, mcflags); | |
4299 | ||
4300 | if (needed == 0) | |
4301 | goto fail; | |
4302 | ||
4303 | /* This is temporarily reduced for calculation */ | |
4304 | ASSERT(nsegs > 1); | |
4305 | nsegs--; | |
4306 | } | |
4307 | ||
4308 | /* | |
4309 | * Attempt to allocate the rest of the composite mbuf + cluster | |
4310 | * elements for the number of segment chains that we need. | |
4311 | */ | |
4312 | if (bufsize <= m_maxsize(MC_CL)) | |
4313 | cp = m_cache(MC_MBUF_CL); | |
4314 | else if (bufsize <= m_maxsize(MC_BIGCL)) | |
4315 | cp = m_cache(MC_MBUF_BIGCL); | |
4316 | else | |
4317 | cp = m_cache(MC_MBUF_16KCL); | |
4318 | needed = mcache_alloc_ext(cp, &mp_list, needed * nsegs, mcflags); | |
4319 | ||
4320 | /* Round it down to avoid creating a partial segment chain */ | |
4321 | needed = (needed / nsegs) * nsegs; | |
4322 | if (needed == 0) | |
4323 | goto fail; | |
4324 | ||
4325 | if (resid > 0) { | |
4326 | /* | |
4327 | * We're about to construct the chain(s); take into account | |
4328 | * the number of segments we have created above to hold the | |
4329 | * residual data for each chain, as well as restore the | |
4330 | * original count of segments per chain. | |
4331 | */ | |
4332 | ASSERT(nsegs > 0); | |
4333 | needed += needed / nsegs; | |
4334 | nsegs++; | |
4335 | } | |
4336 | ||
4337 | for (;;) { | |
4338 | struct mbuf *m; | |
39037602 | 4339 | u_int16_t flag; |
2d21ac55 A |
4340 | struct ext_ref *rfa; |
4341 | void *cl; | |
4342 | int pkthdr; | |
4343 | ||
4344 | ++num; | |
4345 | if (nsegs == 1 || (num % nsegs) != 0 || resid == 0) { | |
4346 | m = (struct mbuf *)mp_list; | |
4347 | mp_list = mp_list->obj_next; | |
4348 | } else { | |
4349 | m = (struct mbuf *)rmp_list; | |
4350 | rmp_list = rmp_list->obj_next; | |
4351 | } | |
4352 | ASSERT(m != NULL); | |
4353 | VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT); | |
4354 | VERIFY(m->m_ext.ext_free == NULL || | |
4355 | m->m_ext.ext_free == m_bigfree || | |
4356 | m->m_ext.ext_free == m_16kfree); | |
4357 | ||
4358 | cl = m->m_ext.ext_buf; | |
4359 | rfa = MEXT_RFA(m); | |
4360 | ||
4361 | ASSERT(cl != NULL && rfa != NULL); | |
4362 | VERIFY(MBUF_IS_COMPOSITE(m)); | |
4363 | ||
4364 | flag = MEXT_FLAGS(m); | |
4365 | ||
4366 | pkthdr = (nsegs == 1 || (num % nsegs) == 1); | |
4367 | if (pkthdr) | |
4368 | first = m; | |
4369 | MBUF_INIT(m, pkthdr, MT_DATA); | |
4370 | if (m->m_ext.ext_free == m_16kfree) { | |
4371 | MBUF_16KCL_INIT(m, cl, rfa, 1, flag); | |
4372 | } else if (m->m_ext.ext_free == m_bigfree) { | |
4373 | MBUF_BIGCL_INIT(m, cl, rfa, 1, flag); | |
4374 | } else { | |
4375 | MBUF_CL_INIT(m, cl, rfa, 1, flag); | |
4376 | } | |
4377 | #if CONFIG_MACF_NET | |
4378 | if (pkthdr && mac_init_mbuf(m, wait) != 0) { | |
4379 | --num; | |
6d2010ae | 4380 | m_freem(m); |
2d21ac55 | 4381 | break; |
91447636 | 4382 | } |
2d21ac55 A |
4383 | #endif /* MAC_NET */ |
4384 | ||
4385 | *np = m; | |
4386 | if ((num % nsegs) == 0) | |
4387 | np = &first->m_nextpkt; | |
4388 | else | |
4389 | np = &m->m_next; | |
4390 | ||
4391 | if (num == needed) | |
4392 | break; | |
4393 | } | |
4394 | ||
4395 | if (num > 0) { | |
4396 | mtype_stat_add(MT_DATA, num); | |
4397 | mtype_stat_sub(MT_FREE, num); | |
91447636 | 4398 | } |
2d21ac55 A |
4399 | |
4400 | num /= nsegs; | |
4401 | ||
4402 | /* We've got them all; return to caller */ | |
4403 | if (num == *numlist) { | |
4404 | ASSERT(mp_list == NULL && rmp_list == NULL); | |
4405 | return (top); | |
4406 | } | |
4407 | ||
91447636 | 4408 | fail: |
2d21ac55 A |
4409 | /* Free up what's left of the above */ |
4410 | if (mp_list != NULL) | |
4411 | mcache_free_ext(cp, mp_list); | |
4412 | if (rmp_list != NULL) | |
4413 | mcache_free_ext(rcp, rmp_list); | |
4414 | if (wantall && top != NULL) { | |
91447636 | 4415 | m_freem(top); |
2d21ac55 | 4416 | return (NULL); |
91447636 | 4417 | } |
2d21ac55 A |
4418 | *numlist = num; |
4419 | return (top); | |
91447636 | 4420 | } |
fa4905b1 | 4421 | |
2d21ac55 A |
4422 | /* |
4423 | * Best effort to get a mbuf cluster + pkthdr. Used by drivers to allocated | |
4424 | * packets on receive ring. | |
91447636 A |
4425 | */ |
4426 | __private_extern__ struct mbuf * | |
2d21ac55 | 4427 | m_getpacket_how(int wait) |
91447636 A |
4428 | { |
4429 | unsigned int num_needed = 1; | |
2d21ac55 A |
4430 | |
4431 | return (m_getpackets_internal(&num_needed, 1, wait, 1, | |
4432 | m_maxsize(MC_CL))); | |
91447636 | 4433 | } |
fa4905b1 | 4434 | |
2d21ac55 A |
4435 | /* |
4436 | * Best effort to get a mbuf cluster + pkthdr. Used by drivers to allocated | |
4437 | * packets on receive ring. | |
91447636 A |
4438 | */ |
4439 | struct mbuf * | |
4440 | m_getpacket(void) | |
4441 | { | |
4442 | unsigned int num_needed = 1; | |
9bccf70c | 4443 | |
2d21ac55 A |
4444 | return (m_getpackets_internal(&num_needed, 1, M_WAIT, 1, |
4445 | m_maxsize(MC_CL))); | |
91447636 | 4446 | } |
fa4905b1 | 4447 | |
91447636 | 4448 | /* |
2d21ac55 A |
4449 | * Return a list of mbuf hdrs that point to clusters. Try for num_needed; |
4450 | * if this can't be met, return whatever number were available. Set up the | |
4451 | * first num_with_pkthdrs with mbuf hdrs configured as packet headers. These | |
4452 | * are chained on the m_nextpkt field. Any packets requested beyond this are | |
4453 | * chained onto the last packet header's m_next field. | |
91447636 A |
4454 | */ |
4455 | struct mbuf * | |
4456 | m_getpackets(int num_needed, int num_with_pkthdrs, int how) | |
4457 | { | |
4458 | unsigned int n = num_needed; | |
fa4905b1 | 4459 | |
2d21ac55 A |
4460 | return (m_getpackets_internal(&n, num_with_pkthdrs, how, 0, |
4461 | m_maxsize(MC_CL))); | |
4462 | } | |
fa4905b1 | 4463 | |
9bccf70c | 4464 | /* |
2d21ac55 A |
4465 | * Return a list of mbuf hdrs set up as packet hdrs chained together |
4466 | * on the m_nextpkt field | |
9bccf70c | 4467 | */ |
fa4905b1 A |
4468 | struct mbuf * |
4469 | m_getpackethdrs(int num_needed, int how) | |
4470 | { | |
4471 | struct mbuf *m; | |
4472 | struct mbuf **np, *top; | |
4473 | ||
4474 | top = NULL; | |
4475 | np = ⊤ | |
4476 | ||
fa4905b1 | 4477 | while (num_needed--) { |
2d21ac55 A |
4478 | m = _M_RETRYHDR(how, MT_DATA); |
4479 | if (m == NULL) | |
4480 | break; | |
4481 | ||
4482 | *np = m; | |
4483 | np = &m->m_nextpkt; | |
4484 | } | |
fa4905b1 A |
4485 | |
4486 | return (top); | |
4487 | } | |
4488 | ||
2d21ac55 A |
4489 | /* |
4490 | * Free an mbuf list (m_nextpkt) while following m_next. Returns the count | |
4491 | * for mbufs packets freed. Used by the drivers. | |
1c79356b | 4492 | */ |
2d21ac55 A |
4493 | int |
4494 | m_freem_list(struct mbuf *m) | |
1c79356b A |
4495 | { |
4496 | struct mbuf *nextpkt; | |
2d21ac55 A |
4497 | mcache_obj_t *mp_list = NULL; |
4498 | mcache_obj_t *mcl_list = NULL; | |
4499 | mcache_obj_t *mbc_list = NULL; | |
4500 | mcache_obj_t *m16k_list = NULL; | |
4501 | mcache_obj_t *m_mcl_list = NULL; | |
4502 | mcache_obj_t *m_mbc_list = NULL; | |
4503 | mcache_obj_t *m_m16k_list = NULL; | |
4504 | mcache_obj_t *ref_list = NULL; | |
4505 | int pktcount = 0; | |
4506 | int mt_free = 0, mt_data = 0, mt_header = 0, mt_soname = 0, mt_tag = 0; | |
4507 | ||
4508 | while (m != NULL) { | |
4509 | pktcount++; | |
4510 | ||
4511 | nextpkt = m->m_nextpkt; | |
4512 | m->m_nextpkt = NULL; | |
4513 | ||
4514 | while (m != NULL) { | |
4515 | struct mbuf *next = m->m_next; | |
4516 | mcache_obj_t *o, *rfa; | |
39037602 A |
4517 | u_int32_t composite; |
4518 | u_int16_t refcnt; | |
fa4905b1 | 4519 | |
2d21ac55 A |
4520 | if (m->m_type == MT_FREE) |
4521 | panic("m_free: freeing an already freed mbuf"); | |
9bccf70c | 4522 | |
2d21ac55 | 4523 | if (m->m_flags & M_PKTHDR) { |
39236c6e A |
4524 | /* Check for scratch area overflow */ |
4525 | m_redzone_verify(m); | |
4526 | /* Free the aux data and tags if there is any */ | |
91447636 | 4527 | m_tag_delete_chain(m, NULL); |
91447636 | 4528 | } |
9bccf70c | 4529 | |
39037602 A |
4530 | if (!(m->m_flags & M_EXT)) { |
4531 | mt_free++; | |
2d21ac55 | 4532 | goto simple_free; |
39037602 A |
4533 | } |
4534 | ||
4535 | if (MBUF_IS_PAIRED(m) && m_free_paired(m)) { | |
4536 | m = next; | |
4537 | continue; | |
4538 | } | |
4539 | ||
4540 | mt_free++; | |
2d21ac55 | 4541 | |
316670eb | 4542 | o = (mcache_obj_t *)(void *)m->m_ext.ext_buf; |
2d21ac55 | 4543 | refcnt = m_decref(m); |
6d2010ae | 4544 | composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE); |
39037602 A |
4545 | |
4546 | if (refcnt == MEXT_MINREF(m) && !composite) { | |
2d21ac55 A |
4547 | if (m->m_ext.ext_free == NULL) { |
4548 | o->obj_next = mcl_list; | |
4549 | mcl_list = o; | |
4550 | } else if (m->m_ext.ext_free == m_bigfree) { | |
4551 | o->obj_next = mbc_list; | |
4552 | mbc_list = o; | |
4553 | } else if (m->m_ext.ext_free == m_16kfree) { | |
4554 | o->obj_next = m16k_list; | |
4555 | m16k_list = o; | |
4556 | } else { | |
4557 | (*(m->m_ext.ext_free))((caddr_t)o, | |
4558 | m->m_ext.ext_size, | |
4559 | m->m_ext.ext_arg); | |
4560 | } | |
316670eb | 4561 | rfa = (mcache_obj_t *)(void *)MEXT_RFA(m); |
2d21ac55 A |
4562 | rfa->obj_next = ref_list; |
4563 | ref_list = rfa; | |
4564 | MEXT_RFA(m) = NULL; | |
39037602 A |
4565 | } else if (refcnt == MEXT_MINREF(m) && composite) { |
4566 | VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED)); | |
2d21ac55 A |
4567 | VERIFY(m->m_type != MT_FREE); |
4568 | /* | |
4569 | * Amortize the costs of atomic operations | |
4570 | * by doing them at the end, if possible. | |
4571 | */ | |
4572 | if (m->m_type == MT_DATA) | |
4573 | mt_data++; | |
4574 | else if (m->m_type == MT_HEADER) | |
4575 | mt_header++; | |
4576 | else if (m->m_type == MT_SONAME) | |
4577 | mt_soname++; | |
4578 | else if (m->m_type == MT_TAG) | |
4579 | mt_tag++; | |
4580 | else | |
4581 | mtype_stat_dec(m->m_type); | |
fa4905b1 | 4582 | |
2d21ac55 A |
4583 | m->m_type = MT_FREE; |
4584 | m->m_flags = M_EXT; | |
4585 | m->m_len = 0; | |
4586 | m->m_next = m->m_nextpkt = NULL; | |
4587 | ||
6d2010ae A |
4588 | MEXT_FLAGS(m) &= ~EXTF_READONLY; |
4589 | ||
2d21ac55 A |
4590 | /* "Free" into the intermediate cache */ |
4591 | o = (mcache_obj_t *)m; | |
4592 | if (m->m_ext.ext_free == NULL) { | |
4593 | o->obj_next = m_mcl_list; | |
4594 | m_mcl_list = o; | |
4595 | } else if (m->m_ext.ext_free == m_bigfree) { | |
4596 | o->obj_next = m_mbc_list; | |
4597 | m_mbc_list = o; | |
1c79356b | 4598 | } else { |
2d21ac55 A |
4599 | VERIFY(m->m_ext.ext_free == m_16kfree); |
4600 | o->obj_next = m_m16k_list; | |
4601 | m_m16k_list = o; | |
1c79356b | 4602 | } |
2d21ac55 A |
4603 | m = next; |
4604 | continue; | |
1c79356b | 4605 | } |
2d21ac55 A |
4606 | simple_free: |
4607 | /* | |
4608 | * Amortize the costs of atomic operations | |
4609 | * by doing them at the end, if possible. | |
4610 | */ | |
4611 | if (m->m_type == MT_DATA) | |
4612 | mt_data++; | |
4613 | else if (m->m_type == MT_HEADER) | |
4614 | mt_header++; | |
4615 | else if (m->m_type == MT_SONAME) | |
4616 | mt_soname++; | |
4617 | else if (m->m_type == MT_TAG) | |
4618 | mt_tag++; | |
4619 | else if (m->m_type != MT_FREE) | |
4620 | mtype_stat_dec(m->m_type); | |
4621 | ||
1c79356b | 4622 | m->m_type = MT_FREE; |
2d21ac55 A |
4623 | m->m_flags = m->m_len = 0; |
4624 | m->m_next = m->m_nextpkt = NULL; | |
fa4905b1 | 4625 | |
2d21ac55 A |
4626 | ((mcache_obj_t *)m)->obj_next = mp_list; |
4627 | mp_list = (mcache_obj_t *)m; | |
4628 | ||
4629 | m = next; | |
4630 | } | |
fa4905b1 | 4631 | |
2d21ac55 A |
4632 | m = nextpkt; |
4633 | } | |
fa4905b1 | 4634 | |
2d21ac55 A |
4635 | if (mt_free > 0) |
4636 | mtype_stat_add(MT_FREE, mt_free); | |
4637 | if (mt_data > 0) | |
4638 | mtype_stat_sub(MT_DATA, mt_data); | |
4639 | if (mt_header > 0) | |
4640 | mtype_stat_sub(MT_HEADER, mt_header); | |
4641 | if (mt_soname > 0) | |
4642 | mtype_stat_sub(MT_SONAME, mt_soname); | |
4643 | if (mt_tag > 0) | |
4644 | mtype_stat_sub(MT_TAG, mt_tag); | |
4645 | ||
4646 | if (mp_list != NULL) | |
4647 | mcache_free_ext(m_cache(MC_MBUF), mp_list); | |
4648 | if (mcl_list != NULL) | |
4649 | mcache_free_ext(m_cache(MC_CL), mcl_list); | |
4650 | if (mbc_list != NULL) | |
4651 | mcache_free_ext(m_cache(MC_BIGCL), mbc_list); | |
4652 | if (m16k_list != NULL) | |
4653 | mcache_free_ext(m_cache(MC_16KCL), m16k_list); | |
4654 | if (m_mcl_list != NULL) | |
4655 | mcache_free_ext(m_cache(MC_MBUF_CL), m_mcl_list); | |
4656 | if (m_mbc_list != NULL) | |
4657 | mcache_free_ext(m_cache(MC_MBUF_BIGCL), m_mbc_list); | |
4658 | if (m_m16k_list != NULL) | |
4659 | mcache_free_ext(m_cache(MC_MBUF_16KCL), m_m16k_list); | |
4660 | if (ref_list != NULL) | |
4661 | mcache_free_ext(ref_cache, ref_list); | |
4662 | ||
4663 | return (pktcount); | |
1c79356b A |
4664 | } |
4665 | ||
4666 | void | |
2d21ac55 | 4667 | m_freem(struct mbuf *m) |
1c79356b | 4668 | { |
2d21ac55 | 4669 | while (m != NULL) |
1c79356b A |
4670 | m = m_free(m); |
4671 | } | |
4672 | ||
4673 | /* | |
4674 | * Mbuffer utility routines. | |
4675 | */ | |
2d21ac55 | 4676 | |
1c79356b | 4677 | /* |
2d21ac55 A |
4678 | * Compute the amount of space available before the current start |
4679 | * of data in an mbuf. | |
1c79356b | 4680 | */ |
91447636 | 4681 | int |
2d21ac55 | 4682 | m_leadingspace(struct mbuf *m) |
1c79356b A |
4683 | { |
4684 | if (m->m_flags & M_EXT) { | |
4685 | if (MCLHASREFERENCE(m)) | |
2d21ac55 | 4686 | return (0); |
1c79356b A |
4687 | return (m->m_data - m->m_ext.ext_buf); |
4688 | } | |
4689 | if (m->m_flags & M_PKTHDR) | |
4690 | return (m->m_data - m->m_pktdat); | |
4691 | return (m->m_data - m->m_dat); | |
4692 | } | |
4693 | ||
4694 | /* | |
2d21ac55 | 4695 | * Compute the amount of space available after the end of data in an mbuf. |
1c79356b | 4696 | */ |
91447636 | 4697 | int |
2d21ac55 | 4698 | m_trailingspace(struct mbuf *m) |
1c79356b A |
4699 | { |
4700 | if (m->m_flags & M_EXT) { | |
4701 | if (MCLHASREFERENCE(m)) | |
2d21ac55 | 4702 | return (0); |
1c79356b | 4703 | return (m->m_ext.ext_buf + m->m_ext.ext_size - |
2d21ac55 | 4704 | (m->m_data + m->m_len)); |
1c79356b A |
4705 | } |
4706 | return (&m->m_dat[MLEN] - (m->m_data + m->m_len)); | |
4707 | } | |
4708 | ||
4709 | /* | |
2d21ac55 A |
4710 | * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain, |
4711 | * copy junk along. Does not adjust packet header length. | |
1c79356b A |
4712 | */ |
4713 | struct mbuf * | |
2d21ac55 | 4714 | m_prepend(struct mbuf *m, int len, int how) |
1c79356b A |
4715 | { |
4716 | struct mbuf *mn; | |
4717 | ||
2d21ac55 A |
4718 | _MGET(mn, how, m->m_type); |
4719 | if (mn == NULL) { | |
1c79356b | 4720 | m_freem(m); |
2d21ac55 | 4721 | return (NULL); |
1c79356b A |
4722 | } |
4723 | if (m->m_flags & M_PKTHDR) { | |
4724 | M_COPY_PKTHDR(mn, m); | |
4725 | m->m_flags &= ~M_PKTHDR; | |
4726 | } | |
4727 | mn->m_next = m; | |
4728 | m = mn; | |
3e170ce0 A |
4729 | if (m->m_flags & M_PKTHDR) { |
4730 | VERIFY(len <= MHLEN); | |
1c79356b | 4731 | MH_ALIGN(m, len); |
3e170ce0 A |
4732 | } else { |
4733 | VERIFY(len <= MLEN); | |
4734 | M_ALIGN(m, len); | |
4735 | } | |
1c79356b A |
4736 | m->m_len = len; |
4737 | return (m); | |
4738 | } | |
4739 | ||
9bccf70c | 4740 | /* |
2d21ac55 A |
4741 | * Replacement for old M_PREPEND macro: allocate new mbuf to prepend to |
4742 | * chain, copy junk along, and adjust length. | |
9bccf70c A |
4743 | */ |
4744 | struct mbuf * | |
3e170ce0 | 4745 | m_prepend_2(struct mbuf *m, int len, int how, int align) |
2d21ac55 | 4746 | { |
3e170ce0 A |
4747 | if (M_LEADINGSPACE(m) >= len && |
4748 | (!align || IS_P2ALIGNED((m->m_data - len), sizeof(u_int32_t)))) { | |
2d21ac55 A |
4749 | m->m_data -= len; |
4750 | m->m_len += len; | |
4751 | } else { | |
9bccf70c | 4752 | m = m_prepend(m, len, how); |
2d21ac55 A |
4753 | } |
4754 | if ((m) && (m->m_flags & M_PKTHDR)) | |
4755 | m->m_pkthdr.len += len; | |
4756 | return (m); | |
9bccf70c A |
4757 | } |
4758 | ||
1c79356b A |
4759 | /* |
4760 | * Make a copy of an mbuf chain starting "off0" bytes from the beginning, | |
4761 | * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. | |
4762 | * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. | |
4763 | */ | |
4764 | int MCFail; | |
4765 | ||
4766 | struct mbuf * | |
39236c6e | 4767 | m_copym_mode(struct mbuf *m, int off0, int len, int wait, uint32_t mode) |
1c79356b | 4768 | { |
2d21ac55 | 4769 | struct mbuf *n, *mhdr = NULL, **np; |
91447636 | 4770 | int off = off0; |
1c79356b A |
4771 | struct mbuf *top; |
4772 | int copyhdr = 0; | |
4773 | ||
4774 | if (off < 0 || len < 0) | |
2d21ac55 A |
4775 | panic("m_copym: invalid offset %d or len %d", off, len); |
4776 | ||
fe8ab488 A |
4777 | VERIFY((mode != M_COPYM_MUST_COPY_HDR && |
4778 | mode != M_COPYM_MUST_MOVE_HDR) || (m->m_flags & M_PKTHDR)); | |
4779 | ||
4780 | if ((off == 0 && (m->m_flags & M_PKTHDR)) || | |
4781 | mode == M_COPYM_MUST_COPY_HDR || mode == M_COPYM_MUST_MOVE_HDR) { | |
2d21ac55 | 4782 | mhdr = m; |
1c79356b | 4783 | copyhdr = 1; |
2d21ac55 | 4784 | } |
fa4905b1 A |
4785 | |
4786 | while (off >= m->m_len) { | |
2d21ac55 A |
4787 | if (m->m_next == NULL) |
4788 | panic("m_copym: invalid mbuf chain"); | |
1c79356b A |
4789 | off -= m->m_len; |
4790 | m = m->m_next; | |
4791 | } | |
4792 | np = ⊤ | |
2d21ac55 | 4793 | top = NULL; |
fa4905b1 | 4794 | |
1c79356b | 4795 | while (len > 0) { |
2d21ac55 | 4796 | if (m == NULL) { |
1c79356b | 4797 | if (len != M_COPYALL) |
2d21ac55 | 4798 | panic("m_copym: len != M_COPYALL"); |
1c79356b A |
4799 | break; |
4800 | } | |
2d21ac55 | 4801 | |
fe8ab488 A |
4802 | if (copyhdr) |
4803 | n = _M_RETRYHDR(wait, m->m_type); | |
4804 | else | |
4805 | n = _M_RETRY(wait, m->m_type); | |
1c79356b | 4806 | *np = n; |
fa4905b1 | 4807 | |
2d21ac55 | 4808 | if (n == NULL) |
1c79356b | 4809 | goto nospace; |
2d21ac55 A |
4810 | |
4811 | if (copyhdr != 0) { | |
fe8ab488 A |
4812 | if ((mode == M_COPYM_MOVE_HDR) || |
4813 | (mode == M_COPYM_MUST_MOVE_HDR)) { | |
39236c6e | 4814 | M_COPY_PKTHDR(n, mhdr); |
fe8ab488 A |
4815 | } else if ((mode == M_COPYM_COPY_HDR) || |
4816 | (mode == M_COPYM_MUST_COPY_HDR)) { | |
39236c6e A |
4817 | if (m_dup_pkthdr(n, mhdr, wait) == 0) |
4818 | goto nospace; | |
4819 | } | |
1c79356b A |
4820 | if (len == M_COPYALL) |
4821 | n->m_pkthdr.len -= off0; | |
4822 | else | |
4823 | n->m_pkthdr.len = len; | |
4824 | copyhdr = 0; | |
fe8ab488 A |
4825 | /* |
4826 | * There is data to copy from the packet header mbuf | |
4827 | * if it is empty or it is before the starting offset | |
4828 | */ | |
4829 | if (mhdr != m) { | |
4830 | np = &n->m_next; | |
4831 | continue; | |
2d21ac55 | 4832 | } |
1c79356b | 4833 | } |
2d21ac55 | 4834 | n->m_len = MIN(len, (m->m_len - off)); |
1c79356b | 4835 | if (m->m_flags & M_EXT) { |
1c79356b | 4836 | n->m_ext = m->m_ext; |
2d21ac55 | 4837 | m_incref(m); |
1c79356b A |
4838 | n->m_data = m->m_data + off; |
4839 | n->m_flags |= M_EXT; | |
fa4905b1 | 4840 | } else { |
fe8ab488 A |
4841 | /* |
4842 | * Limit to the capacity of the destination | |
4843 | */ | |
4844 | if (n->m_flags & M_PKTHDR) | |
4845 | n->m_len = MIN(n->m_len, MHLEN); | |
4846 | else | |
4847 | n->m_len = MIN(n->m_len, MLEN); | |
4848 | ||
4849 | if (MTOD(n, char *) + n->m_len > ((char *)n) + MSIZE) | |
39037602 | 4850 | panic("%s n %p copy overflow", |
fe8ab488 A |
4851 | __func__, n); |
4852 | ||
2d21ac55 | 4853 | bcopy(MTOD(m, caddr_t)+off, MTOD(n, caddr_t), |
1c79356b | 4854 | (unsigned)n->m_len); |
fa4905b1 | 4855 | } |
1c79356b A |
4856 | if (len != M_COPYALL) |
4857 | len -= n->m_len; | |
4858 | off = 0; | |
4859 | m = m->m_next; | |
4860 | np = &n->m_next; | |
4861 | } | |
fa4905b1 | 4862 | |
2d21ac55 | 4863 | if (top == NULL) |
1c79356b | 4864 | MCFail++; |
fa4905b1 | 4865 | |
1c79356b A |
4866 | return (top); |
4867 | nospace: | |
fa4905b1 | 4868 | |
1c79356b A |
4869 | m_freem(top); |
4870 | MCFail++; | |
2d21ac55 | 4871 | return (NULL); |
1c79356b A |
4872 | } |
4873 | ||
39236c6e A |
4874 | |
4875 | struct mbuf * | |
4876 | m_copym(struct mbuf *m, int off0, int len, int wait) | |
4877 | { | |
4878 | return (m_copym_mode(m, off0, len, wait, M_COPYM_MOVE_HDR)); | |
4879 | } | |
4880 | ||
9bccf70c | 4881 | /* |
2d21ac55 A |
4882 | * Equivalent to m_copym except that all necessary mbuf hdrs are allocated |
4883 | * within this routine also, the last mbuf and offset accessed are passed | |
4884 | * out and can be passed back in to avoid having to rescan the entire mbuf | |
4885 | * list (normally hung off of the socket) | |
9bccf70c | 4886 | */ |
fa4905b1 | 4887 | struct mbuf * |
fe8ab488 | 4888 | m_copym_with_hdrs(struct mbuf *m0, int off0, int len0, int wait, |
39236c6e | 4889 | struct mbuf **m_lastm, int *m_off, uint32_t mode) |
2d21ac55 | 4890 | { |
fe8ab488 | 4891 | struct mbuf *m = m0, *n, **np = NULL; |
2d21ac55 A |
4892 | int off = off0, len = len0; |
4893 | struct mbuf *top = NULL; | |
4894 | int mcflags = MSLEEPF(wait); | |
fa4905b1 | 4895 | int copyhdr = 0; |
2d21ac55 A |
4896 | int type = 0; |
4897 | mcache_obj_t *list = NULL; | |
4898 | int needed = 0; | |
fa4905b1 | 4899 | |
2d21ac55 | 4900 | if (off == 0 && (m->m_flags & M_PKTHDR)) |
fa4905b1 | 4901 | copyhdr = 1; |
39037602 | 4902 | |
fe8ab488 | 4903 | if (m_lastm != NULL && *m_lastm != NULL) { |
6d2010ae | 4904 | m = *m_lastm; |
fa4905b1 A |
4905 | off = *m_off; |
4906 | } else { | |
2d21ac55 A |
4907 | while (off >= m->m_len) { |
4908 | off -= m->m_len; | |
fa4905b1 A |
4909 | m = m->m_next; |
4910 | } | |
4911 | } | |
91447636 | 4912 | |
2d21ac55 A |
4913 | n = m; |
4914 | while (len > 0) { | |
4915 | needed++; | |
4916 | ASSERT(n != NULL); | |
4917 | len -= MIN(len, (n->m_len - ((needed == 1) ? off : 0))); | |
4918 | n = n->m_next; | |
4919 | } | |
4920 | needed++; | |
4921 | len = len0; | |
4922 | ||
4923 | /* | |
4924 | * If the caller doesn't want to be put to sleep, mark it with | |
4925 | * MCR_TRYHARD so that we may reclaim buffers from other places | |
4926 | * before giving up. | |
4927 | */ | |
4928 | if (mcflags & MCR_NOSLEEP) | |
4929 | mcflags |= MCR_TRYHARD; | |
4930 | ||
4931 | if (mcache_alloc_ext(m_cache(MC_MBUF), &list, needed, | |
4932 | mcflags) != needed) | |
4933 | goto nospace; | |
fa4905b1 | 4934 | |
2d21ac55 | 4935 | needed = 0; |
fa4905b1 | 4936 | while (len > 0) { |
2d21ac55 A |
4937 | n = (struct mbuf *)list; |
4938 | list = list->obj_next; | |
4939 | ASSERT(n != NULL && m != NULL); | |
4940 | ||
4941 | type = (top == NULL) ? MT_HEADER : m->m_type; | |
4942 | MBUF_INIT(n, (top == NULL), type); | |
4943 | #if CONFIG_MACF_NET | |
4944 | if (top == NULL && mac_mbuf_label_init(n, wait) != 0) { | |
4945 | mtype_stat_inc(MT_HEADER); | |
4946 | mtype_stat_dec(MT_FREE); | |
4947 | m_free(n); | |
fa4905b1 | 4948 | goto nospace; |
2d21ac55 A |
4949 | } |
4950 | #endif /* MAC_NET */ | |
4951 | ||
4952 | if (top == NULL) { | |
4953 | top = n; | |
fa4905b1 A |
4954 | np = &top->m_next; |
4955 | continue; | |
2d21ac55 A |
4956 | } else { |
4957 | needed++; | |
4958 | *np = n; | |
4959 | } | |
fa4905b1 A |
4960 | |
4961 | if (copyhdr) { | |
fe8ab488 A |
4962 | if ((mode == M_COPYM_MOVE_HDR) || |
4963 | (mode == M_COPYM_MUST_MOVE_HDR)) { | |
39236c6e | 4964 | M_COPY_PKTHDR(n, m); |
fe8ab488 A |
4965 | } else if ((mode == M_COPYM_COPY_HDR) || |
4966 | (mode == M_COPYM_MUST_COPY_HDR)) { | |
39236c6e A |
4967 | if (m_dup_pkthdr(n, m, wait) == 0) |
4968 | goto nospace; | |
4969 | } | |
fa4905b1 A |
4970 | n->m_pkthdr.len = len; |
4971 | copyhdr = 0; | |
4972 | } | |
2d21ac55 | 4973 | n->m_len = MIN(len, (m->m_len - off)); |
fa4905b1 A |
4974 | |
4975 | if (m->m_flags & M_EXT) { | |
4976 | n->m_ext = m->m_ext; | |
2d21ac55 | 4977 | m_incref(m); |
fa4905b1 A |
4978 | n->m_data = m->m_data + off; |
4979 | n->m_flags |= M_EXT; | |
4980 | } else { | |
fe8ab488 | 4981 | if (MTOD(n, char *) + n->m_len > ((char *)n) + MSIZE) |
39037602 | 4982 | panic("%s n %p copy overflow", |
fe8ab488 A |
4983 | __func__, n); |
4984 | ||
2d21ac55 | 4985 | bcopy(MTOD(m, caddr_t)+off, MTOD(n, caddr_t), |
fa4905b1 A |
4986 | (unsigned)n->m_len); |
4987 | } | |
4988 | len -= n->m_len; | |
2d21ac55 | 4989 | |
fa4905b1 | 4990 | if (len == 0) { |
fe8ab488 A |
4991 | if (m_lastm != NULL && m_off != NULL) { |
4992 | if ((off + n->m_len) == m->m_len) { | |
4993 | *m_lastm = m->m_next; | |
4994 | *m_off = 0; | |
4995 | } else { | |
4996 | *m_lastm = m; | |
4997 | *m_off = off + n->m_len; | |
4998 | } | |
fa4905b1 | 4999 | } |
2d21ac55 | 5000 | break; |
fa4905b1 A |
5001 | } |
5002 | off = 0; | |
5003 | m = m->m_next; | |
5004 | np = &n->m_next; | |
5005 | } | |
fa4905b1 | 5006 | |
2d21ac55 A |
5007 | mtype_stat_inc(MT_HEADER); |
5008 | mtype_stat_add(type, needed); | |
5009 | mtype_stat_sub(MT_FREE, needed + 1); | |
5010 | ||
5011 | ASSERT(list == NULL); | |
fa4905b1 | 5012 | return (top); |
fa4905b1 | 5013 | |
2d21ac55 A |
5014 | nospace: |
5015 | if (list != NULL) | |
5016 | mcache_free_ext(m_cache(MC_MBUF), list); | |
5017 | if (top != NULL) | |
5018 | m_freem(top); | |
fa4905b1 | 5019 | MCFail++; |
2d21ac55 | 5020 | return (NULL); |
fa4905b1 A |
5021 | } |
5022 | ||
1c79356b A |
5023 | /* |
5024 | * Copy data from an mbuf chain starting "off" bytes from the beginning, | |
5025 | * continuing for "len" bytes, into the indicated buffer. | |
5026 | */ | |
2d21ac55 | 5027 | void |
b0d623f7 | 5028 | m_copydata(struct mbuf *m, int off, int len, void *vp) |
1c79356b | 5029 | { |
91447636 | 5030 | unsigned count; |
b0d623f7 | 5031 | char *cp = vp; |
1c79356b A |
5032 | |
5033 | if (off < 0 || len < 0) | |
2d21ac55 A |
5034 | panic("m_copydata: invalid offset %d or len %d", off, len); |
5035 | ||
1c79356b | 5036 | while (off > 0) { |
2d21ac55 A |
5037 | if (m == NULL) |
5038 | panic("m_copydata: invalid mbuf chain"); | |
1c79356b A |
5039 | if (off < m->m_len) |
5040 | break; | |
5041 | off -= m->m_len; | |
5042 | m = m->m_next; | |
5043 | } | |
5044 | while (len > 0) { | |
2d21ac55 A |
5045 | if (m == NULL) |
5046 | panic("m_copydata: invalid mbuf chain"); | |
5047 | count = MIN(m->m_len - off, len); | |
5048 | bcopy(MTOD(m, caddr_t) + off, cp, count); | |
1c79356b A |
5049 | len -= count; |
5050 | cp += count; | |
5051 | off = 0; | |
5052 | m = m->m_next; | |
5053 | } | |
5054 | } | |
5055 | ||
5056 | /* | |
2d21ac55 A |
5057 | * Concatenate mbuf chain n to m. Both chains must be of the same type |
5058 | * (e.g. MT_DATA). Any m_pkthdr is not updated. | |
1c79356b | 5059 | */ |
2d21ac55 A |
5060 | void |
5061 | m_cat(struct mbuf *m, struct mbuf *n) | |
1c79356b A |
5062 | { |
5063 | while (m->m_next) | |
5064 | m = m->m_next; | |
5065 | while (n) { | |
2d21ac55 | 5066 | if ((m->m_flags & M_EXT) || |
1c79356b A |
5067 | m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { |
5068 | /* just join the two chains */ | |
5069 | m->m_next = n; | |
5070 | return; | |
5071 | } | |
5072 | /* splat the data from one into the other */ | |
2d21ac55 | 5073 | bcopy(MTOD(n, caddr_t), MTOD(m, caddr_t) + m->m_len, |
1c79356b A |
5074 | (u_int)n->m_len); |
5075 | m->m_len += n->m_len; | |
5076 | n = m_free(n); | |
5077 | } | |
5078 | } | |
5079 | ||
5080 | void | |
2d21ac55 | 5081 | m_adj(struct mbuf *mp, int req_len) |
1c79356b | 5082 | { |
91447636 A |
5083 | int len = req_len; |
5084 | struct mbuf *m; | |
5085 | int count; | |
1c79356b A |
5086 | |
5087 | if ((m = mp) == NULL) | |
5088 | return; | |
5089 | if (len >= 0) { | |
5090 | /* | |
5091 | * Trim from head. | |
5092 | */ | |
5093 | while (m != NULL && len > 0) { | |
5094 | if (m->m_len <= len) { | |
5095 | len -= m->m_len; | |
5096 | m->m_len = 0; | |
5097 | m = m->m_next; | |
5098 | } else { | |
5099 | m->m_len -= len; | |
5100 | m->m_data += len; | |
5101 | len = 0; | |
5102 | } | |
5103 | } | |
5104 | m = mp; | |
5105 | if (m->m_flags & M_PKTHDR) | |
5106 | m->m_pkthdr.len -= (req_len - len); | |
5107 | } else { | |
5108 | /* | |
5109 | * Trim from tail. Scan the mbuf chain, | |
5110 | * calculating its length and finding the last mbuf. | |
5111 | * If the adjustment only affects this mbuf, then just | |
5112 | * adjust and return. Otherwise, rescan and truncate | |
5113 | * after the remaining size. | |
5114 | */ | |
5115 | len = -len; | |
5116 | count = 0; | |
5117 | for (;;) { | |
5118 | count += m->m_len; | |
5119 | if (m->m_next == (struct mbuf *)0) | |
5120 | break; | |
5121 | m = m->m_next; | |
5122 | } | |
5123 | if (m->m_len >= len) { | |
5124 | m->m_len -= len; | |
5125 | m = mp; | |
5126 | if (m->m_flags & M_PKTHDR) | |
5127 | m->m_pkthdr.len -= len; | |
5128 | return; | |
5129 | } | |
5130 | count -= len; | |
5131 | if (count < 0) | |
5132 | count = 0; | |
5133 | /* | |
5134 | * Correct length for chain is "count". | |
5135 | * Find the mbuf with last data, adjust its length, | |
5136 | * and toss data from remaining mbufs on chain. | |
5137 | */ | |
5138 | m = mp; | |
5139 | if (m->m_flags & M_PKTHDR) | |
5140 | m->m_pkthdr.len = count; | |
5141 | for (; m; m = m->m_next) { | |
5142 | if (m->m_len >= count) { | |
5143 | m->m_len = count; | |
5144 | break; | |
5145 | } | |
5146 | count -= m->m_len; | |
5147 | } | |
91447636 | 5148 | while ((m = m->m_next)) |
1c79356b A |
5149 | m->m_len = 0; |
5150 | } | |
5151 | } | |
5152 | ||
5153 | /* | |
5154 | * Rearange an mbuf chain so that len bytes are contiguous | |
5155 | * and in the data area of an mbuf (so that mtod and dtom | |
5156 | * will work for a structure of size len). Returns the resulting | |
5157 | * mbuf chain on success, frees it and returns null on failure. | |
5158 | * If there is room, it will add up to max_protohdr-len extra bytes to the | |
5159 | * contiguous region in an attempt to avoid being called next time. | |
5160 | */ | |
5161 | int MPFail; | |
5162 | ||
5163 | struct mbuf * | |
2d21ac55 | 5164 | m_pullup(struct mbuf *n, int len) |
1c79356b | 5165 | { |
91447636 A |
5166 | struct mbuf *m; |
5167 | int count; | |
1c79356b A |
5168 | int space; |
5169 | ||
5170 | /* | |
5171 | * If first mbuf has no cluster, and has room for len bytes | |
5172 | * without shifting current data, pullup into it, | |
5173 | * otherwise allocate a new mbuf to prepend to the chain. | |
5174 | */ | |
5175 | if ((n->m_flags & M_EXT) == 0 && | |
5176 | n->m_data + len < &n->m_dat[MLEN] && n->m_next) { | |
5177 | if (n->m_len >= len) | |
5178 | return (n); | |
5179 | m = n; | |
5180 | n = n->m_next; | |
5181 | len -= m->m_len; | |
5182 | } else { | |
5183 | if (len > MHLEN) | |
5184 | goto bad; | |
2d21ac55 | 5185 | _MGET(m, M_DONTWAIT, n->m_type); |
1c79356b A |
5186 | if (m == 0) |
5187 | goto bad; | |
5188 | m->m_len = 0; | |
5189 | if (n->m_flags & M_PKTHDR) { | |
5190 | M_COPY_PKTHDR(m, n); | |
5191 | n->m_flags &= ~M_PKTHDR; | |
5192 | } | |
5193 | } | |
5194 | space = &m->m_dat[MLEN] - (m->m_data + m->m_len); | |
5195 | do { | |
2d21ac55 A |
5196 | count = MIN(MIN(MAX(len, max_protohdr), space), n->m_len); |
5197 | bcopy(MTOD(n, caddr_t), MTOD(m, caddr_t) + m->m_len, | |
5198 | (unsigned)count); | |
1c79356b A |
5199 | len -= count; |
5200 | m->m_len += count; | |
5201 | n->m_len -= count; | |
5202 | space -= count; | |
5203 | if (n->m_len) | |
5204 | n->m_data += count; | |
5205 | else | |
5206 | n = m_free(n); | |
5207 | } while (len > 0 && n); | |
5208 | if (len > 0) { | |
5209 | (void) m_free(m); | |
5210 | goto bad; | |
5211 | } | |
5212 | m->m_next = n; | |
5213 | return (m); | |
5214 | bad: | |
5215 | m_freem(n); | |
5216 | MPFail++; | |
5217 | return (0); | |
5218 | } | |
5219 | ||
6d2010ae A |
5220 | /* |
5221 | * Like m_pullup(), except a new mbuf is always allocated, and we allow | |
5222 | * the amount of empty space before the data in the new mbuf to be specified | |
5223 | * (in the event that the caller expects to prepend later). | |
5224 | */ | |
5225 | __private_extern__ int MSFail = 0; | |
5226 | ||
5227 | __private_extern__ struct mbuf * | |
5228 | m_copyup(struct mbuf *n, int len, int dstoff) | |
5229 | { | |
5230 | struct mbuf *m; | |
5231 | int count, space; | |
5232 | ||
5233 | if (len > (MHLEN - dstoff)) | |
5234 | goto bad; | |
5235 | MGET(m, M_DONTWAIT, n->m_type); | |
5236 | if (m == NULL) | |
5237 | goto bad; | |
5238 | m->m_len = 0; | |
5239 | if (n->m_flags & M_PKTHDR) { | |
5240 | m_copy_pkthdr(m, n); | |
5241 | n->m_flags &= ~M_PKTHDR; | |
5242 | } | |
5243 | m->m_data += dstoff; | |
5244 | space = &m->m_dat[MLEN] - (m->m_data + m->m_len); | |
5245 | do { | |
5246 | count = min(min(max(len, max_protohdr), space), n->m_len); | |
5247 | memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), | |
5248 | (unsigned)count); | |
5249 | len -= count; | |
5250 | m->m_len += count; | |
5251 | n->m_len -= count; | |
5252 | space -= count; | |
5253 | if (n->m_len) | |
5254 | n->m_data += count; | |
5255 | else | |
5256 | n = m_free(n); | |
5257 | } while (len > 0 && n); | |
5258 | if (len > 0) { | |
5259 | (void) m_free(m); | |
5260 | goto bad; | |
5261 | } | |
5262 | m->m_next = n; | |
5263 | return (m); | |
5264 | bad: | |
5265 | m_freem(n); | |
5266 | MSFail++; | |
5267 | return (NULL); | |
5268 | } | |
5269 | ||
1c79356b A |
5270 | /* |
5271 | * Partition an mbuf chain in two pieces, returning the tail -- | |
5272 | * all but the first len0 bytes. In case of failure, it returns NULL and | |
5273 | * attempts to restore the chain to its original state. | |
5274 | */ | |
5275 | struct mbuf * | |
2d21ac55 | 5276 | m_split(struct mbuf *m0, int len0, int wait) |
b0d623f7 A |
5277 | { |
5278 | return (m_split0(m0, len0, wait, 1)); | |
5279 | } | |
5280 | ||
5281 | static struct mbuf * | |
5282 | m_split0(struct mbuf *m0, int len0, int wait, int copyhdr) | |
1c79356b | 5283 | { |
91447636 | 5284 | struct mbuf *m, *n; |
1c79356b A |
5285 | unsigned len = len0, remain; |
5286 | ||
5287 | for (m = m0; m && len > m->m_len; m = m->m_next) | |
5288 | len -= m->m_len; | |
2d21ac55 A |
5289 | if (m == NULL) |
5290 | return (NULL); | |
1c79356b | 5291 | remain = m->m_len - len; |
b0d623f7 | 5292 | if (copyhdr && (m0->m_flags & M_PKTHDR)) { |
2d21ac55 A |
5293 | _MGETHDR(n, wait, m0->m_type); |
5294 | if (n == NULL) | |
5295 | return (NULL); | |
1c79356b A |
5296 | n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; |
5297 | n->m_pkthdr.len = m0->m_pkthdr.len - len0; | |
5298 | m0->m_pkthdr.len = len0; | |
5299 | if (m->m_flags & M_EXT) | |
5300 | goto extpacket; | |
5301 | if (remain > MHLEN) { | |
5302 | /* m can't be the lead packet */ | |
5303 | MH_ALIGN(n, 0); | |
5304 | n->m_next = m_split(m, len, wait); | |
2d21ac55 | 5305 | if (n->m_next == NULL) { |
1c79356b | 5306 | (void) m_free(n); |
2d21ac55 | 5307 | return (NULL); |
1c79356b A |
5308 | } else |
5309 | return (n); | |
5310 | } else | |
5311 | MH_ALIGN(n, remain); | |
5312 | } else if (remain == 0) { | |
5313 | n = m->m_next; | |
2d21ac55 | 5314 | m->m_next = NULL; |
1c79356b A |
5315 | return (n); |
5316 | } else { | |
2d21ac55 A |
5317 | _MGET(n, wait, m->m_type); |
5318 | if (n == NULL) | |
5319 | return (NULL); | |
1c79356b A |
5320 | M_ALIGN(n, remain); |
5321 | } | |
5322 | extpacket: | |
5323 | if (m->m_flags & M_EXT) { | |
5324 | n->m_flags |= M_EXT; | |
0b4e3aa0 | 5325 | n->m_ext = m->m_ext; |
2d21ac55 | 5326 | m_incref(m); |
1c79356b A |
5327 | n->m_data = m->m_data + len; |
5328 | } else { | |
2d21ac55 | 5329 | bcopy(MTOD(m, caddr_t) + len, MTOD(n, caddr_t), remain); |
1c79356b A |
5330 | } |
5331 | n->m_len = remain; | |
5332 | m->m_len = len; | |
5333 | n->m_next = m->m_next; | |
2d21ac55 | 5334 | m->m_next = NULL; |
1c79356b A |
5335 | return (n); |
5336 | } | |
2d21ac55 | 5337 | |
1c79356b A |
5338 | /* |
5339 | * Routine to copy from device local memory into mbufs. | |
5340 | */ | |
5341 | struct mbuf * | |
2d21ac55 A |
5342 | m_devget(char *buf, int totlen, int off0, struct ifnet *ifp, |
5343 | void (*copy)(const void *, void *, size_t)) | |
1c79356b | 5344 | { |
91447636 | 5345 | struct mbuf *m; |
2d21ac55 | 5346 | struct mbuf *top = NULL, **mp = ⊤ |
91447636 A |
5347 | int off = off0, len; |
5348 | char *cp; | |
1c79356b A |
5349 | char *epkt; |
5350 | ||
5351 | cp = buf; | |
5352 | epkt = cp + totlen; | |
5353 | if (off) { | |
5354 | /* | |
5355 | * If 'off' is non-zero, packet is trailer-encapsulated, | |
5356 | * so we have to skip the type and length fields. | |
5357 | */ | |
2d21ac55 A |
5358 | cp += off + 2 * sizeof (u_int16_t); |
5359 | totlen -= 2 * sizeof (u_int16_t); | |
1c79356b | 5360 | } |
2d21ac55 A |
5361 | _MGETHDR(m, M_DONTWAIT, MT_DATA); |
5362 | if (m == NULL) | |
5363 | return (NULL); | |
1c79356b A |
5364 | m->m_pkthdr.rcvif = ifp; |
5365 | m->m_pkthdr.len = totlen; | |
5366 | m->m_len = MHLEN; | |
5367 | ||
5368 | while (totlen > 0) { | |
2d21ac55 A |
5369 | if (top != NULL) { |
5370 | _MGET(m, M_DONTWAIT, MT_DATA); | |
5371 | if (m == NULL) { | |
1c79356b | 5372 | m_freem(top); |
2d21ac55 | 5373 | return (NULL); |
1c79356b A |
5374 | } |
5375 | m->m_len = MLEN; | |
5376 | } | |
2d21ac55 | 5377 | len = MIN(totlen, epkt - cp); |
1c79356b A |
5378 | if (len >= MINCLSIZE) { |
5379 | MCLGET(m, M_DONTWAIT); | |
2d21ac55 A |
5380 | if (m->m_flags & M_EXT) { |
5381 | m->m_len = len = MIN(len, m_maxsize(MC_CL)); | |
5382 | } else { | |
5383 | /* give up when it's out of cluster mbufs */ | |
5384 | if (top != NULL) | |
5385 | m_freem(top); | |
1c79356b | 5386 | m_freem(m); |
2d21ac55 | 5387 | return (NULL); |
1c79356b A |
5388 | } |
5389 | } else { | |
5390 | /* | |
5391 | * Place initial small packet/header at end of mbuf. | |
5392 | */ | |
5393 | if (len < m->m_len) { | |
2d21ac55 A |
5394 | if (top == NULL && |
5395 | len + max_linkhdr <= m->m_len) | |
1c79356b A |
5396 | m->m_data += max_linkhdr; |
5397 | m->m_len = len; | |
2d21ac55 | 5398 | } else { |
1c79356b | 5399 | len = m->m_len; |
2d21ac55 | 5400 | } |
1c79356b A |
5401 | } |
5402 | if (copy) | |
2d21ac55 | 5403 | copy(cp, MTOD(m, caddr_t), (unsigned)len); |
1c79356b | 5404 | else |
2d21ac55 | 5405 | bcopy(cp, MTOD(m, caddr_t), (unsigned)len); |
1c79356b A |
5406 | cp += len; |
5407 | *mp = m; | |
5408 | mp = &m->m_next; | |
5409 | totlen -= len; | |
5410 | if (cp == epkt) | |
5411 | cp = buf; | |
5412 | } | |
5413 | return (top); | |
5414 | } | |
5415 | ||
6d2010ae A |
5416 | #ifndef MBUF_GROWTH_NORMAL_THRESH |
5417 | #define MBUF_GROWTH_NORMAL_THRESH 25 | |
5418 | #endif | |
b0d623f7 | 5419 | |
1c79356b | 5420 | /* |
2d21ac55 | 5421 | * Cluster freelist allocation check. |
1c79356b A |
5422 | */ |
5423 | static int | |
91447636 | 5424 | m_howmany(int num, size_t bufsize) |
1c79356b | 5425 | { |
2d21ac55 | 5426 | int i = 0, j = 0; |
6d2010ae A |
5427 | u_int32_t m_mbclusters, m_clusters, m_bigclusters, m_16kclusters; |
5428 | u_int32_t m_mbfree, m_clfree, m_bigclfree, m_16kclfree; | |
5429 | u_int32_t sumclusters, freeclusters; | |
5430 | u_int32_t percent_pool, percent_kmem; | |
5431 | u_int32_t mb_growth, mb_growth_thresh; | |
5432 | ||
5433 | VERIFY(bufsize == m_maxsize(MC_BIGCL) || | |
5434 | bufsize == m_maxsize(MC_16KCL)); | |
2d21ac55 A |
5435 | |
5436 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
5437 | ||
6d2010ae A |
5438 | /* Numbers in 2K cluster units */ |
5439 | m_mbclusters = m_total(MC_MBUF) >> NMBPCLSHIFT; | |
2d21ac55 | 5440 | m_clusters = m_total(MC_CL); |
6d2010ae | 5441 | m_bigclusters = m_total(MC_BIGCL) << NCLPBGSHIFT; |
2d21ac55 | 5442 | m_16kclusters = m_total(MC_16KCL); |
6d2010ae A |
5443 | sumclusters = m_mbclusters + m_clusters + m_bigclusters; |
5444 | ||
5445 | m_mbfree = m_infree(MC_MBUF) >> NMBPCLSHIFT; | |
2d21ac55 | 5446 | m_clfree = m_infree(MC_CL); |
6d2010ae | 5447 | m_bigclfree = m_infree(MC_BIGCL) << NCLPBGSHIFT; |
2d21ac55 | 5448 | m_16kclfree = m_infree(MC_16KCL); |
6d2010ae | 5449 | freeclusters = m_mbfree + m_clfree + m_bigclfree; |
2d21ac55 | 5450 | |
91447636 | 5451 | /* Bail if we've maxed out the mbuf memory map */ |
6d2010ae | 5452 | if ((bufsize == m_maxsize(MC_BIGCL) && sumclusters >= nclusters) || |
2d21ac55 | 5453 | (njcl > 0 && bufsize == m_maxsize(MC_16KCL) && |
6d2010ae | 5454 | (m_16kclusters << NCLPJCLSHIFT) >= njcl)) { |
2d21ac55 A |
5455 | return (0); |
5456 | } | |
5457 | ||
6d2010ae | 5458 | if (bufsize == m_maxsize(MC_BIGCL)) { |
2d21ac55 | 5459 | /* Under minimum */ |
6d2010ae A |
5460 | if (m_bigclusters < m_minlimit(MC_BIGCL)) |
5461 | return (m_minlimit(MC_BIGCL) - m_bigclusters); | |
5462 | ||
5463 | percent_pool = | |
5464 | ((sumclusters - freeclusters) * 100) / sumclusters; | |
5465 | percent_kmem = (sumclusters * 100) / nclusters; | |
5466 | ||
5467 | /* | |
5468 | * If a light/normal user, grow conservatively (75%) | |
5469 | * If a heavy user, grow aggressively (50%) | |
5470 | */ | |
5471 | if (percent_kmem < MBUF_GROWTH_NORMAL_THRESH) | |
5472 | mb_growth = MB_GROWTH_NORMAL; | |
5473 | else | |
5474 | mb_growth = MB_GROWTH_AGGRESSIVE; | |
5475 | ||
5476 | if (percent_kmem < 5) { | |
5477 | /* For initial allocations */ | |
5478 | i = num; | |
5479 | } else { | |
5480 | /* Return if >= MBIGCL_LOWAT clusters available */ | |
5481 | if (m_infree(MC_BIGCL) >= MBIGCL_LOWAT && | |
5482 | m_total(MC_BIGCL) >= | |
5483 | MBIGCL_LOWAT + m_minlimit(MC_BIGCL)) | |
2d21ac55 | 5484 | return (0); |
6d2010ae A |
5485 | |
5486 | /* Ensure at least num clusters are accessible */ | |
5487 | if (num >= m_infree(MC_BIGCL)) | |
5488 | i = num - m_infree(MC_BIGCL); | |
5489 | if (num > m_total(MC_BIGCL) - m_minlimit(MC_BIGCL)) | |
5490 | j = num - (m_total(MC_BIGCL) - | |
5491 | m_minlimit(MC_BIGCL)); | |
5492 | ||
2d21ac55 | 5493 | i = MAX(i, j); |
6d2010ae A |
5494 | |
5495 | /* | |
5496 | * Grow pool if percent_pool > 75 (normal growth) | |
5497 | * or percent_pool > 50 (aggressive growth). | |
5498 | */ | |
5499 | mb_growth_thresh = 100 - (100 / (1 << mb_growth)); | |
5500 | if (percent_pool > mb_growth_thresh) | |
5501 | j = ((sumclusters + num) >> mb_growth) - | |
5502 | freeclusters; | |
2d21ac55 | 5503 | i = MAX(i, j); |
2d21ac55 | 5504 | } |
6d2010ae A |
5505 | |
5506 | /* Check to ensure we didn't go over limits */ | |
5507 | if (i + m_bigclusters >= m_maxlimit(MC_BIGCL)) | |
5508 | i = m_maxlimit(MC_BIGCL) - m_bigclusters; | |
5509 | if ((i << 1) + sumclusters >= nclusters) | |
5510 | i = (nclusters - sumclusters) >> 1; | |
2d21ac55 | 5511 | VERIFY((m_total(MC_BIGCL) + i) <= m_maxlimit(MC_BIGCL)); |
6d2010ae A |
5512 | VERIFY(sumclusters + (i << 1) <= nclusters); |
5513 | ||
5514 | } else { /* 16K CL */ | |
2d21ac55 | 5515 | VERIFY(njcl > 0); |
6d2010ae A |
5516 | /* Ensure at least num clusters are available */ |
5517 | if (num >= m_16kclfree) | |
5518 | i = num - m_16kclfree; | |
5519 | ||
5520 | /* Always grow 16KCL pool aggressively */ | |
5521 | if (((m_16kclusters + num) >> 1) > m_16kclfree) | |
5522 | j = ((m_16kclusters + num) >> 1) - m_16kclfree; | |
5523 | i = MAX(i, j); | |
5524 | ||
5525 | /* Check to ensure we don't go over limit */ | |
5526 | if (i + m_16kclusters >= m_maxlimit(MC_16KCL)) | |
5527 | i = m_maxlimit(MC_16KCL) - m_16kclusters; | |
2d21ac55 | 5528 | VERIFY((m_total(MC_16KCL) + i) <= m_maxlimit(MC_16KCL)); |
91447636 | 5529 | } |
2d21ac55 | 5530 | return (i); |
1c79356b | 5531 | } |
b0d623f7 A |
5532 | /* |
5533 | * Return the number of bytes in the mbuf chain, m. | |
6d2010ae A |
5534 | */ |
5535 | unsigned int | |
b0d623f7 A |
5536 | m_length(struct mbuf *m) |
5537 | { | |
5538 | struct mbuf *m0; | |
5539 | unsigned int pktlen; | |
5540 | ||
5541 | if (m->m_flags & M_PKTHDR) | |
5542 | return (m->m_pkthdr.len); | |
5543 | ||
5544 | pktlen = 0; | |
5545 | for (m0 = m; m0 != NULL; m0 = m0->m_next) | |
5546 | pktlen += m0->m_len; | |
5547 | return (pktlen); | |
5548 | } | |
5549 | ||
1c79356b A |
5550 | /* |
5551 | * Copy data from a buffer back into the indicated mbuf chain, | |
5552 | * starting "off" bytes from the beginning, extending the mbuf | |
5553 | * chain if necessary. | |
5554 | */ | |
5555 | void | |
b0d623f7 | 5556 | m_copyback(struct mbuf *m0, int off, int len, const void *cp) |
1c79356b | 5557 | { |
b0d623f7 A |
5558 | #if DEBUG |
5559 | struct mbuf *origm = m0; | |
5560 | int error; | |
5561 | #endif /* DEBUG */ | |
1c79356b | 5562 | |
2d21ac55 | 5563 | if (m0 == NULL) |
1c79356b | 5564 | return; |
b0d623f7 A |
5565 | |
5566 | #if DEBUG | |
5567 | error = | |
5568 | #endif /* DEBUG */ | |
5569 | m_copyback0(&m0, off, len, cp, | |
5570 | M_COPYBACK0_COPYBACK | M_COPYBACK0_EXTEND, M_DONTWAIT); | |
5571 | ||
5572 | #if DEBUG | |
5573 | if (error != 0 || (m0 != NULL && origm != m0)) | |
5574 | panic("m_copyback"); | |
5575 | #endif /* DEBUG */ | |
5576 | } | |
5577 | ||
5578 | struct mbuf * | |
5579 | m_copyback_cow(struct mbuf *m0, int off, int len, const void *cp, int how) | |
5580 | { | |
5581 | int error; | |
5582 | ||
5583 | /* don't support chain expansion */ | |
5584 | VERIFY(off + len <= m_length(m0)); | |
5585 | ||
5586 | error = m_copyback0(&m0, off, len, cp, | |
5587 | M_COPYBACK0_COPYBACK | M_COPYBACK0_COW, how); | |
5588 | if (error) { | |
5589 | /* | |
5590 | * no way to recover from partial success. | |
5591 | * just free the chain. | |
5592 | */ | |
5593 | m_freem(m0); | |
5594 | return (NULL); | |
5595 | } | |
5596 | return (m0); | |
5597 | } | |
5598 | ||
5599 | /* | |
5600 | * m_makewritable: ensure the specified range writable. | |
5601 | */ | |
5602 | int | |
5603 | m_makewritable(struct mbuf **mp, int off, int len, int how) | |
5604 | { | |
5605 | int error; | |
5606 | #if DEBUG | |
5607 | struct mbuf *n; | |
5608 | int origlen, reslen; | |
5609 | ||
5610 | origlen = m_length(*mp); | |
5611 | #endif /* DEBUG */ | |
5612 | ||
5613 | #if 0 /* M_COPYALL is large enough */ | |
5614 | if (len == M_COPYALL) | |
5615 | len = m_length(*mp) - off; /* XXX */ | |
5616 | #endif | |
5617 | ||
5618 | error = m_copyback0(mp, off, len, NULL, | |
5619 | M_COPYBACK0_PRESERVE | M_COPYBACK0_COW, how); | |
5620 | ||
5621 | #if DEBUG | |
5622 | reslen = 0; | |
5623 | for (n = *mp; n; n = n->m_next) | |
5624 | reslen += n->m_len; | |
5625 | if (origlen != reslen) | |
5626 | panic("m_makewritable: length changed"); | |
5627 | if (((*mp)->m_flags & M_PKTHDR) && reslen != (*mp)->m_pkthdr.len) | |
5628 | panic("m_makewritable: inconsist"); | |
5629 | #endif /* DEBUG */ | |
5630 | ||
5631 | return (error); | |
5632 | } | |
5633 | ||
5634 | static int | |
5635 | m_copyback0(struct mbuf **mp0, int off, int len, const void *vp, int flags, | |
5636 | int how) | |
5637 | { | |
5638 | int mlen; | |
5639 | struct mbuf *m, *n; | |
5640 | struct mbuf **mp; | |
5641 | int totlen = 0; | |
5642 | const char *cp = vp; | |
5643 | ||
5644 | VERIFY(mp0 != NULL); | |
5645 | VERIFY(*mp0 != NULL); | |
5646 | VERIFY((flags & M_COPYBACK0_PRESERVE) == 0 || cp == NULL); | |
5647 | VERIFY((flags & M_COPYBACK0_COPYBACK) == 0 || cp != NULL); | |
5648 | ||
5649 | /* | |
5650 | * we don't bother to update "totlen" in the case of M_COPYBACK0_COW, | |
5651 | * assuming that M_COPYBACK0_EXTEND and M_COPYBACK0_COW are exclusive. | |
5652 | */ | |
5653 | ||
5654 | VERIFY((~flags & (M_COPYBACK0_EXTEND|M_COPYBACK0_COW)) != 0); | |
5655 | ||
5656 | mp = mp0; | |
5657 | m = *mp; | |
1c79356b A |
5658 | while (off > (mlen = m->m_len)) { |
5659 | off -= mlen; | |
5660 | totlen += mlen; | |
2d21ac55 | 5661 | if (m->m_next == NULL) { |
b0d623f7 A |
5662 | int tspace; |
5663 | extend: | |
5664 | if (!(flags & M_COPYBACK0_EXTEND)) | |
1c79356b | 5665 | goto out; |
b0d623f7 A |
5666 | |
5667 | /* | |
5668 | * try to make some space at the end of "m". | |
5669 | */ | |
5670 | ||
5671 | mlen = m->m_len; | |
5672 | if (off + len >= MINCLSIZE && | |
5673 | !(m->m_flags & M_EXT) && m->m_len == 0) { | |
5674 | MCLGET(m, how); | |
5675 | } | |
5676 | tspace = M_TRAILINGSPACE(m); | |
5677 | if (tspace > 0) { | |
5678 | tspace = MIN(tspace, off + len); | |
5679 | VERIFY(tspace > 0); | |
5680 | bzero(mtod(m, char *) + m->m_len, | |
5681 | MIN(off, tspace)); | |
5682 | m->m_len += tspace; | |
5683 | off += mlen; | |
5684 | totlen -= mlen; | |
5685 | continue; | |
5686 | } | |
5687 | ||
5688 | /* | |
5689 | * need to allocate an mbuf. | |
5690 | */ | |
5691 | ||
5692 | if (off + len >= MINCLSIZE) { | |
5693 | n = m_getcl(how, m->m_type, 0); | |
5694 | } else { | |
5695 | n = _M_GET(how, m->m_type); | |
5696 | } | |
5697 | if (n == NULL) { | |
5698 | goto out; | |
5699 | } | |
5700 | n->m_len = 0; | |
5701 | n->m_len = MIN(M_TRAILINGSPACE(n), off + len); | |
5702 | bzero(mtod(n, char *), MIN(n->m_len, off)); | |
1c79356b A |
5703 | m->m_next = n; |
5704 | } | |
b0d623f7 | 5705 | mp = &m->m_next; |
1c79356b A |
5706 | m = m->m_next; |
5707 | } | |
5708 | while (len > 0) { | |
b0d623f7 A |
5709 | mlen = m->m_len - off; |
5710 | if (mlen != 0 && m_mclhasreference(m)) { | |
5711 | char *datap; | |
5712 | int eatlen; | |
5713 | ||
5714 | /* | |
5715 | * this mbuf is read-only. | |
5716 | * allocate a new writable mbuf and try again. | |
5717 | */ | |
5718 | ||
39236c6e | 5719 | #if DIAGNOSTIC |
b0d623f7 A |
5720 | if (!(flags & M_COPYBACK0_COW)) |
5721 | panic("m_copyback0: read-only"); | |
39236c6e | 5722 | #endif /* DIAGNOSTIC */ |
b0d623f7 A |
5723 | |
5724 | /* | |
5725 | * if we're going to write into the middle of | |
5726 | * a mbuf, split it first. | |
5727 | */ | |
5728 | if (off > 0 && len < mlen) { | |
5729 | n = m_split0(m, off, how, 0); | |
5730 | if (n == NULL) | |
5731 | goto enobufs; | |
5732 | m->m_next = n; | |
5733 | mp = &m->m_next; | |
5734 | m = n; | |
5735 | off = 0; | |
5736 | continue; | |
5737 | } | |
5738 | ||
5739 | /* | |
5740 | * XXX TODO coalesce into the trailingspace of | |
5741 | * the previous mbuf when possible. | |
5742 | */ | |
5743 | ||
5744 | /* | |
5745 | * allocate a new mbuf. copy packet header if needed. | |
5746 | */ | |
5747 | n = _M_GET(how, m->m_type); | |
5748 | if (n == NULL) | |
5749 | goto enobufs; | |
5750 | if (off == 0 && (m->m_flags & M_PKTHDR)) { | |
5751 | M_COPY_PKTHDR(n, m); | |
5752 | n->m_len = MHLEN; | |
5753 | } else { | |
5754 | if (len >= MINCLSIZE) | |
5755 | MCLGET(n, M_DONTWAIT); | |
5756 | n->m_len = | |
5757 | (n->m_flags & M_EXT) ? MCLBYTES : MLEN; | |
5758 | } | |
5759 | if (n->m_len > len) | |
5760 | n->m_len = len; | |
5761 | ||
5762 | /* | |
5763 | * free the region which has been overwritten. | |
5764 | * copying data from old mbufs if requested. | |
5765 | */ | |
5766 | if (flags & M_COPYBACK0_PRESERVE) | |
5767 | datap = mtod(n, char *); | |
5768 | else | |
5769 | datap = NULL; | |
5770 | eatlen = n->m_len; | |
5771 | VERIFY(off == 0 || eatlen >= mlen); | |
5772 | if (off > 0) { | |
5773 | VERIFY(len >= mlen); | |
5774 | m->m_len = off; | |
5775 | m->m_next = n; | |
5776 | if (datap) { | |
5777 | m_copydata(m, off, mlen, datap); | |
5778 | datap += mlen; | |
5779 | } | |
5780 | eatlen -= mlen; | |
5781 | mp = &m->m_next; | |
5782 | m = m->m_next; | |
5783 | } | |
5784 | while (m != NULL && m_mclhasreference(m) && | |
5785 | n->m_type == m->m_type && eatlen > 0) { | |
5786 | mlen = MIN(eatlen, m->m_len); | |
5787 | if (datap) { | |
5788 | m_copydata(m, 0, mlen, datap); | |
5789 | datap += mlen; | |
5790 | } | |
5791 | m->m_data += mlen; | |
5792 | m->m_len -= mlen; | |
5793 | eatlen -= mlen; | |
5794 | if (m->m_len == 0) | |
5795 | *mp = m = m_free(m); | |
5796 | } | |
5797 | if (eatlen > 0) | |
5798 | n->m_len -= eatlen; | |
5799 | n->m_next = m; | |
5800 | *mp = m = n; | |
5801 | continue; | |
5802 | } | |
5803 | mlen = MIN(mlen, len); | |
5804 | if (flags & M_COPYBACK0_COPYBACK) { | |
5805 | bcopy(cp, mtod(m, caddr_t) + off, (unsigned)mlen); | |
5806 | cp += mlen; | |
5807 | } | |
1c79356b A |
5808 | len -= mlen; |
5809 | mlen += off; | |
5810 | off = 0; | |
5811 | totlen += mlen; | |
5812 | if (len == 0) | |
5813 | break; | |
2d21ac55 | 5814 | if (m->m_next == NULL) { |
b0d623f7 | 5815 | goto extend; |
1c79356b | 5816 | } |
b0d623f7 | 5817 | mp = &m->m_next; |
1c79356b A |
5818 | m = m->m_next; |
5819 | } | |
2d21ac55 | 5820 | out: |
b0d623f7 A |
5821 | if (((m = *mp0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) { |
5822 | VERIFY(flags & M_COPYBACK0_EXTEND); | |
1c79356b | 5823 | m->m_pkthdr.len = totlen; |
b0d623f7 A |
5824 | } |
5825 | ||
5826 | return (0); | |
5827 | ||
5828 | enobufs: | |
5829 | return (ENOBUFS); | |
1c79356b A |
5830 | } |
5831 | ||
39236c6e | 5832 | uint64_t |
2d21ac55 A |
5833 | mcl_to_paddr(char *addr) |
5834 | { | |
b0d623f7 | 5835 | vm_offset_t base_phys; |
1c79356b | 5836 | |
2d21ac55 | 5837 | if (!MBUF_IN_MAP(addr)) |
39236c6e A |
5838 | return (0); |
5839 | base_phys = mcl_paddr[atop_64(addr - (char *)mbutl)]; | |
1c79356b A |
5840 | |
5841 | if (base_phys == 0) | |
39236c6e A |
5842 | return (0); |
5843 | return ((uint64_t)(ptoa_64(base_phys) | ((uint64_t)addr & PAGE_MASK))); | |
1c79356b A |
5844 | } |
5845 | ||
5846 | /* | |
5847 | * Dup the mbuf chain passed in. The whole thing. No cute additional cruft. | |
5848 | * And really copy the thing. That way, we don't "precompute" checksums | |
2d21ac55 A |
5849 | * for unsuspecting consumers. Assumption: m->m_nextpkt == 0. Trick: for |
5850 | * small packets, don't dup into a cluster. That way received packets | |
5851 | * don't take up too much room in the sockbuf (cf. sbspace()). | |
1c79356b A |
5852 | */ |
5853 | int MDFail; | |
5854 | ||
5855 | struct mbuf * | |
91447636 | 5856 | m_dup(struct mbuf *m, int how) |
2d21ac55 | 5857 | { |
91447636 | 5858 | struct mbuf *n, **np; |
1c79356b A |
5859 | struct mbuf *top; |
5860 | int copyhdr = 0; | |
5861 | ||
5862 | np = ⊤ | |
2d21ac55 | 5863 | top = NULL; |
1c79356b A |
5864 | if (m->m_flags & M_PKTHDR) |
5865 | copyhdr = 1; | |
5866 | ||
5867 | /* | |
5868 | * Quick check: if we have one mbuf and its data fits in an | |
5869 | * mbuf with packet header, just copy and go. | |
5870 | */ | |
2d21ac55 A |
5871 | if (m->m_next == NULL) { |
5872 | /* Then just move the data into an mbuf and be done... */ | |
5873 | if (copyhdr) { | |
5874 | if (m->m_pkthdr.len <= MHLEN && m->m_len <= MHLEN) { | |
5875 | if ((n = _M_GETHDR(how, m->m_type)) == NULL) | |
5876 | return (NULL); | |
1c79356b | 5877 | n->m_len = m->m_len; |
3a60a9f5 A |
5878 | m_dup_pkthdr(n, m, how); |
5879 | bcopy(m->m_data, n->m_data, m->m_len); | |
2d21ac55 | 5880 | return (n); |
1c79356b | 5881 | } |
2d21ac55 A |
5882 | } else if (m->m_len <= MLEN) { |
5883 | if ((n = _M_GET(how, m->m_type)) == NULL) | |
5884 | return (NULL); | |
1c79356b A |
5885 | bcopy(m->m_data, n->m_data, m->m_len); |
5886 | n->m_len = m->m_len; | |
2d21ac55 | 5887 | return (n); |
1c79356b A |
5888 | } |
5889 | } | |
2d21ac55 | 5890 | while (m != NULL) { |
1c79356b | 5891 | #if BLUE_DEBUG |
39037602 | 5892 | printf("<%x: %x, %x, %x\n", m, m->m_flags, m->m_len, |
2d21ac55 | 5893 | m->m_data); |
1c79356b A |
5894 | #endif |
5895 | if (copyhdr) | |
2d21ac55 | 5896 | n = _M_GETHDR(how, m->m_type); |
1c79356b | 5897 | else |
2d21ac55 A |
5898 | n = _M_GET(how, m->m_type); |
5899 | if (n == NULL) | |
1c79356b | 5900 | goto nospace; |
2d21ac55 A |
5901 | if (m->m_flags & M_EXT) { |
5902 | if (m->m_len <= m_maxsize(MC_CL)) | |
5903 | MCLGET(n, how); | |
5904 | else if (m->m_len <= m_maxsize(MC_BIGCL)) | |
5905 | n = m_mbigget(n, how); | |
5906 | else if (m->m_len <= m_maxsize(MC_16KCL) && njcl > 0) | |
5907 | n = m_m16kget(n, how); | |
5908 | if (!(n->m_flags & M_EXT)) { | |
5909 | (void) m_free(n); | |
1c79356b | 5910 | goto nospace; |
2d21ac55 | 5911 | } |
1c79356b A |
5912 | } |
5913 | *np = n; | |
2d21ac55 A |
5914 | if (copyhdr) { |
5915 | /* Don't use M_COPY_PKTHDR: preserve m_data */ | |
3a60a9f5 | 5916 | m_dup_pkthdr(n, m, how); |
1c79356b | 5917 | copyhdr = 0; |
2d21ac55 | 5918 | if (!(n->m_flags & M_EXT)) |
1c79356b A |
5919 | n->m_data = n->m_pktdat; |
5920 | } | |
5921 | n->m_len = m->m_len; | |
5922 | /* | |
5923 | * Get the dup on the same bdry as the original | |
5924 | * Assume that the two mbufs have the same offset to data area | |
2d21ac55 | 5925 | * (up to word boundaries) |
1c79356b | 5926 | */ |
2d21ac55 | 5927 | bcopy(MTOD(m, caddr_t), MTOD(n, caddr_t), (unsigned)n->m_len); |
1c79356b A |
5928 | m = m->m_next; |
5929 | np = &n->m_next; | |
5930 | #if BLUE_DEBUG | |
39037602 | 5931 | printf(">%x: %x, %x, %x\n", n, n->m_flags, n->m_len, |
2d21ac55 | 5932 | n->m_data); |
1c79356b A |
5933 | #endif |
5934 | } | |
5935 | ||
2d21ac55 | 5936 | if (top == NULL) |
1c79356b A |
5937 | MDFail++; |
5938 | return (top); | |
2d21ac55 A |
5939 | |
5940 | nospace: | |
1c79356b A |
5941 | m_freem(top); |
5942 | MDFail++; | |
2d21ac55 | 5943 | return (NULL); |
1c79356b A |
5944 | } |
5945 | ||
2d21ac55 A |
5946 | #define MBUF_MULTIPAGES(m) \ |
5947 | (((m)->m_flags & M_EXT) && \ | |
3e170ce0 A |
5948 | ((IS_P2ALIGNED((m)->m_data, PAGE_SIZE) \ |
5949 | && (m)->m_len > PAGE_SIZE) || \ | |
5950 | (!IS_P2ALIGNED((m)->m_data, PAGE_SIZE) && \ | |
5951 | P2ROUNDUP((m)->m_data, PAGE_SIZE) < ((uintptr_t)(m)->m_data + (m)->m_len)))) | |
2d21ac55 A |
5952 | |
5953 | static struct mbuf * | |
5954 | m_expand(struct mbuf *m, struct mbuf **last) | |
9bccf70c | 5955 | { |
2d21ac55 A |
5956 | struct mbuf *top = NULL; |
5957 | struct mbuf **nm = ⊤ | |
5958 | uintptr_t data0, data; | |
5959 | unsigned int len0, len; | |
5960 | ||
5961 | VERIFY(MBUF_MULTIPAGES(m)); | |
5962 | VERIFY(m->m_next == NULL); | |
5963 | data0 = (uintptr_t)m->m_data; | |
5964 | len0 = m->m_len; | |
5965 | *last = top; | |
5966 | ||
5967 | for (;;) { | |
5968 | struct mbuf *n; | |
5969 | ||
5970 | data = data0; | |
3e170ce0 A |
5971 | if (IS_P2ALIGNED(data, PAGE_SIZE) && len0 > PAGE_SIZE) |
5972 | len = PAGE_SIZE; | |
5973 | else if (!IS_P2ALIGNED(data, PAGE_SIZE) && | |
5974 | P2ROUNDUP(data, PAGE_SIZE) < (data + len0)) | |
5975 | len = P2ROUNDUP(data, PAGE_SIZE) - data; | |
2d21ac55 A |
5976 | else |
5977 | len = len0; | |
5978 | ||
5979 | VERIFY(len > 0); | |
5980 | VERIFY(m->m_flags & M_EXT); | |
5981 | m->m_data = (void *)data; | |
5982 | m->m_len = len; | |
5983 | ||
5984 | *nm = *last = m; | |
5985 | nm = &m->m_next; | |
5986 | m->m_next = NULL; | |
5987 | ||
5988 | data0 += len; | |
5989 | len0 -= len; | |
5990 | if (len0 == 0) | |
5991 | break; | |
5992 | ||
5993 | n = _M_RETRY(M_DONTWAIT, MT_DATA); | |
5994 | if (n == NULL) { | |
5995 | m_freem(top); | |
5996 | top = *last = NULL; | |
5997 | break; | |
5998 | } | |
5999 | ||
6000 | n->m_ext = m->m_ext; | |
6001 | m_incref(m); | |
6002 | n->m_flags |= M_EXT; | |
6003 | m = n; | |
6004 | } | |
6005 | return (top); | |
9bccf70c A |
6006 | } |
6007 | ||
2d21ac55 A |
6008 | struct mbuf * |
6009 | m_normalize(struct mbuf *m) | |
9bccf70c | 6010 | { |
2d21ac55 A |
6011 | struct mbuf *top = NULL; |
6012 | struct mbuf **nm = ⊤ | |
6013 | boolean_t expanded = FALSE; | |
6014 | ||
6015 | while (m != NULL) { | |
6016 | struct mbuf *n; | |
6017 | ||
6018 | n = m->m_next; | |
6019 | m->m_next = NULL; | |
6020 | ||
6021 | /* Does the data cross one or more page boundaries? */ | |
6022 | if (MBUF_MULTIPAGES(m)) { | |
6023 | struct mbuf *last; | |
6024 | if ((m = m_expand(m, &last)) == NULL) { | |
6025 | m_freem(n); | |
6026 | m_freem(top); | |
6027 | top = NULL; | |
6028 | break; | |
6029 | } | |
6030 | *nm = m; | |
6031 | nm = &last->m_next; | |
6032 | expanded = TRUE; | |
6033 | } else { | |
6034 | *nm = m; | |
6035 | nm = &m->m_next; | |
6036 | } | |
6037 | m = n; | |
6038 | } | |
6039 | if (expanded) | |
6040 | atomic_add_32(&mb_normalized, 1); | |
6041 | return (top); | |
9bccf70c A |
6042 | } |
6043 | ||
6d2010ae A |
6044 | /* |
6045 | * Append the specified data to the indicated mbuf chain, | |
6046 | * Extend the mbuf chain if the new data does not fit in | |
6047 | * existing space. | |
6048 | * | |
6049 | * Return 1 if able to complete the job; otherwise 0. | |
6050 | */ | |
6051 | int | |
6052 | m_append(struct mbuf *m0, int len, caddr_t cp) | |
6053 | { | |
6054 | struct mbuf *m, *n; | |
6055 | int remainder, space; | |
6056 | ||
6057 | for (m = m0; m->m_next != NULL; m = m->m_next) | |
6058 | ; | |
6059 | remainder = len; | |
6060 | space = M_TRAILINGSPACE(m); | |
6061 | if (space > 0) { | |
6062 | /* | |
6063 | * Copy into available space. | |
6064 | */ | |
6065 | if (space > remainder) | |
6066 | space = remainder; | |
6067 | bcopy(cp, mtod(m, caddr_t) + m->m_len, space); | |
6068 | m->m_len += space; | |
39037602 A |
6069 | cp += space; |
6070 | remainder -= space; | |
6d2010ae A |
6071 | } |
6072 | while (remainder > 0) { | |
6073 | /* | |
6074 | * Allocate a new mbuf; could check space | |
6075 | * and allocate a cluster instead. | |
6076 | */ | |
6077 | n = m_get(M_WAITOK, m->m_type); | |
6078 | if (n == NULL) | |
6079 | break; | |
6080 | n->m_len = min(MLEN, remainder); | |
6081 | bcopy(cp, mtod(n, caddr_t), n->m_len); | |
6082 | cp += n->m_len; | |
6083 | remainder -= n->m_len; | |
6084 | m->m_next = n; | |
6085 | m = n; | |
6086 | } | |
6087 | if (m0->m_flags & M_PKTHDR) | |
6088 | m0->m_pkthdr.len += len - remainder; | |
6089 | return (remainder == 0); | |
6090 | } | |
6091 | ||
6092 | struct mbuf * | |
6093 | m_last(struct mbuf *m) | |
6094 | { | |
6095 | while (m->m_next != NULL) | |
6096 | m = m->m_next; | |
6097 | return (m); | |
6098 | } | |
6099 | ||
316670eb A |
6100 | unsigned int |
6101 | m_fixhdr(struct mbuf *m0) | |
6102 | { | |
6103 | u_int len; | |
6104 | ||
39236c6e A |
6105 | VERIFY(m0->m_flags & M_PKTHDR); |
6106 | ||
316670eb A |
6107 | len = m_length2(m0, NULL); |
6108 | m0->m_pkthdr.len = len; | |
6109 | return (len); | |
6110 | } | |
6111 | ||
6112 | unsigned int | |
6113 | m_length2(struct mbuf *m0, struct mbuf **last) | |
6114 | { | |
6115 | struct mbuf *m; | |
6116 | u_int len; | |
6117 | ||
6118 | len = 0; | |
6119 | for (m = m0; m != NULL; m = m->m_next) { | |
6120 | len += m->m_len; | |
6121 | if (m->m_next == NULL) | |
6122 | break; | |
6123 | } | |
6124 | if (last != NULL) | |
6125 | *last = m; | |
6126 | return (len); | |
6127 | } | |
6128 | ||
6129 | /* | |
6130 | * Defragment a mbuf chain, returning the shortest possible chain of mbufs | |
6131 | * and clusters. If allocation fails and this cannot be completed, NULL will | |
6132 | * be returned, but the passed in chain will be unchanged. Upon success, | |
6133 | * the original chain will be freed, and the new chain will be returned. | |
6134 | * | |
6135 | * If a non-packet header is passed in, the original mbuf (chain?) will | |
6136 | * be returned unharmed. | |
6137 | * | |
6138 | * If offset is specfied, the first mbuf in the chain will have a leading | |
6139 | * space of the amount stated by the "off" parameter. | |
6140 | * | |
6141 | * This routine requires that the m_pkthdr.header field of the original | |
6142 | * mbuf chain is cleared by the caller. | |
6143 | */ | |
6144 | struct mbuf * | |
6145 | m_defrag_offset(struct mbuf *m0, u_int32_t off, int how) | |
6146 | { | |
6147 | struct mbuf *m_new = NULL, *m_final = NULL; | |
6148 | int progress = 0, length, pktlen; | |
6149 | ||
6150 | if (!(m0->m_flags & M_PKTHDR)) | |
6151 | return (m0); | |
6152 | ||
6153 | VERIFY(off < MHLEN); | |
6154 | m_fixhdr(m0); /* Needed sanity check */ | |
6155 | ||
6156 | pktlen = m0->m_pkthdr.len + off; | |
6157 | if (pktlen > MHLEN) | |
6158 | m_final = m_getcl(how, MT_DATA, M_PKTHDR); | |
6159 | else | |
6160 | m_final = m_gethdr(how, MT_DATA); | |
6161 | ||
6162 | if (m_final == NULL) | |
6163 | goto nospace; | |
6164 | ||
6165 | if (off > 0) { | |
6166 | pktlen -= off; | |
316670eb A |
6167 | m_final->m_data += off; |
6168 | } | |
6169 | ||
6170 | /* | |
6171 | * Caller must have handled the contents pointed to by this | |
6172 | * pointer before coming here, as otherwise it will point to | |
6173 | * the original mbuf which will get freed upon success. | |
6174 | */ | |
39236c6e | 6175 | VERIFY(m0->m_pkthdr.pkt_hdr == NULL); |
316670eb A |
6176 | |
6177 | if (m_dup_pkthdr(m_final, m0, how) == 0) | |
6178 | goto nospace; | |
6179 | ||
6180 | m_new = m_final; | |
6181 | ||
6182 | while (progress < pktlen) { | |
6183 | length = pktlen - progress; | |
6184 | if (length > MCLBYTES) | |
6185 | length = MCLBYTES; | |
39236c6e | 6186 | length -= ((m_new == m_final) ? off : 0); |
316670eb A |
6187 | |
6188 | if (m_new == NULL) { | |
6189 | if (length > MLEN) | |
6190 | m_new = m_getcl(how, MT_DATA, 0); | |
6191 | else | |
6192 | m_new = m_get(how, MT_DATA); | |
6193 | if (m_new == NULL) | |
6194 | goto nospace; | |
6195 | } | |
6196 | ||
6197 | m_copydata(m0, progress, length, mtod(m_new, caddr_t)); | |
6198 | progress += length; | |
6199 | m_new->m_len = length; | |
6200 | if (m_new != m_final) | |
6201 | m_cat(m_final, m_new); | |
6202 | m_new = NULL; | |
6203 | } | |
6204 | m_freem(m0); | |
6205 | m0 = m_final; | |
6206 | return (m0); | |
6207 | nospace: | |
6208 | if (m_final) | |
6209 | m_freem(m_final); | |
6210 | return (NULL); | |
6211 | } | |
6212 | ||
6213 | struct mbuf * | |
6214 | m_defrag(struct mbuf *m0, int how) | |
6215 | { | |
6216 | return (m_defrag_offset(m0, 0, how)); | |
6217 | } | |
6218 | ||
9bccf70c A |
6219 | void |
6220 | m_mchtype(struct mbuf *m, int t) | |
6221 | { | |
2d21ac55 A |
6222 | mtype_stat_inc(t); |
6223 | mtype_stat_dec(m->m_type); | |
6224 | (m)->m_type = t; | |
9bccf70c A |
6225 | } |
6226 | ||
2d21ac55 A |
6227 | void * |
6228 | m_mtod(struct mbuf *m) | |
9bccf70c | 6229 | { |
2d21ac55 | 6230 | return (MTOD(m, void *)); |
9bccf70c A |
6231 | } |
6232 | ||
2d21ac55 A |
6233 | struct mbuf * |
6234 | m_dtom(void *x) | |
9bccf70c | 6235 | { |
b0d623f7 | 6236 | return ((struct mbuf *)((uintptr_t)(x) & ~(MSIZE-1))); |
9bccf70c A |
6237 | } |
6238 | ||
2d21ac55 A |
6239 | void |
6240 | m_mcheck(struct mbuf *m) | |
9bccf70c | 6241 | { |
2d21ac55 | 6242 | _MCHECK(m); |
9bccf70c A |
6243 | } |
6244 | ||
6d2010ae A |
6245 | /* |
6246 | * Return a pointer to mbuf/offset of location in mbuf chain. | |
6247 | */ | |
6248 | struct mbuf * | |
6249 | m_getptr(struct mbuf *m, int loc, int *off) | |
6250 | { | |
6251 | ||
6252 | while (loc >= 0) { | |
6253 | /* Normal end of search. */ | |
6254 | if (m->m_len > loc) { | |
6255 | *off = loc; | |
6256 | return (m); | |
6257 | } else { | |
6258 | loc -= m->m_len; | |
6259 | if (m->m_next == NULL) { | |
6260 | if (loc == 0) { | |
6261 | /* Point at the end of valid data. */ | |
6262 | *off = m->m_len; | |
6263 | return (m); | |
6264 | } | |
6265 | return (NULL); | |
6266 | } | |
6267 | m = m->m_next; | |
6268 | } | |
6269 | } | |
6270 | return (NULL); | |
6271 | } | |
6272 | ||
2d21ac55 A |
6273 | /* |
6274 | * Inform the corresponding mcache(s) that there's a waiter below. | |
6275 | */ | |
6276 | static void | |
6277 | mbuf_waiter_inc(mbuf_class_t class, boolean_t comp) | |
9bccf70c | 6278 | { |
2d21ac55 A |
6279 | mcache_waiter_inc(m_cache(class)); |
6280 | if (comp) { | |
6281 | if (class == MC_CL) { | |
6282 | mcache_waiter_inc(m_cache(MC_MBUF_CL)); | |
6283 | } else if (class == MC_BIGCL) { | |
6284 | mcache_waiter_inc(m_cache(MC_MBUF_BIGCL)); | |
6285 | } else if (class == MC_16KCL) { | |
6286 | mcache_waiter_inc(m_cache(MC_MBUF_16KCL)); | |
6287 | } else { | |
6288 | mcache_waiter_inc(m_cache(MC_MBUF_CL)); | |
6289 | mcache_waiter_inc(m_cache(MC_MBUF_BIGCL)); | |
6290 | } | |
6291 | } | |
9bccf70c A |
6292 | } |
6293 | ||
2d21ac55 A |
6294 | /* |
6295 | * Inform the corresponding mcache(s) that there's no more waiter below. | |
6296 | */ | |
6297 | static void | |
6298 | mbuf_waiter_dec(mbuf_class_t class, boolean_t comp) | |
6299 | { | |
6300 | mcache_waiter_dec(m_cache(class)); | |
6301 | if (comp) { | |
6302 | if (class == MC_CL) { | |
6303 | mcache_waiter_dec(m_cache(MC_MBUF_CL)); | |
6304 | } else if (class == MC_BIGCL) { | |
6305 | mcache_waiter_dec(m_cache(MC_MBUF_BIGCL)); | |
6306 | } else if (class == MC_16KCL) { | |
6307 | mcache_waiter_dec(m_cache(MC_MBUF_16KCL)); | |
6308 | } else { | |
6309 | mcache_waiter_dec(m_cache(MC_MBUF_CL)); | |
6310 | mcache_waiter_dec(m_cache(MC_MBUF_BIGCL)); | |
6311 | } | |
6312 | } | |
6313 | } | |
9bccf70c | 6314 | |
6d2010ae A |
6315 | /* |
6316 | * Called during slab (blocking and non-blocking) allocation. If there | |
6317 | * is at least one waiter, and the time since the first waiter is blocked | |
6318 | * is greater than the watchdog timeout, panic the system. | |
6319 | */ | |
6320 | static void | |
6321 | mbuf_watchdog(void) | |
6322 | { | |
6323 | struct timeval now; | |
6324 | unsigned int since; | |
6325 | ||
6326 | if (mb_waiters == 0 || !mb_watchdog) | |
6327 | return; | |
6328 | ||
6329 | microuptime(&now); | |
6330 | since = now.tv_sec - mb_wdtstart.tv_sec; | |
6331 | if (since >= MB_WDT_MAXTIME) { | |
6332 | panic_plain("%s: %d waiters stuck for %u secs\n%s", __func__, | |
6333 | mb_waiters, since, mbuf_dump()); | |
6334 | /* NOTREACHED */ | |
6335 | } | |
6336 | } | |
6337 | ||
2d21ac55 A |
6338 | /* |
6339 | * Called during blocking allocation. Returns TRUE if one or more objects | |
6340 | * are available at the per-CPU caches layer and that allocation should be | |
6341 | * retried at that level. | |
6342 | */ | |
6343 | static boolean_t | |
6344 | mbuf_sleep(mbuf_class_t class, unsigned int num, int wait) | |
9bccf70c | 6345 | { |
2d21ac55 A |
6346 | boolean_t mcache_retry = FALSE; |
6347 | ||
6348 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
6349 | ||
6350 | /* Check if there's anything at the cache layer */ | |
6351 | if (mbuf_cached_above(class, wait)) { | |
6352 | mcache_retry = TRUE; | |
6353 | goto done; | |
6354 | } | |
6355 | ||
6356 | /* Nothing? Then try hard to get it from somewhere */ | |
6357 | m_reclaim(class, num, (wait & MCR_COMP)); | |
6358 | ||
6359 | /* We tried hard and got something? */ | |
6360 | if (m_infree(class) > 0) { | |
6361 | mbstat.m_wait++; | |
6362 | goto done; | |
6363 | } else if (mbuf_cached_above(class, wait)) { | |
6364 | mbstat.m_wait++; | |
6365 | mcache_retry = TRUE; | |
6366 | goto done; | |
6367 | } else if (wait & MCR_TRYHARD) { | |
6368 | mcache_retry = TRUE; | |
6369 | goto done; | |
6370 | } | |
6371 | ||
6372 | /* | |
6373 | * There's really nothing for us right now; inform the | |
6374 | * cache(s) that there is a waiter below and go to sleep. | |
6375 | */ | |
6376 | mbuf_waiter_inc(class, (wait & MCR_COMP)); | |
6377 | ||
6378 | VERIFY(!(wait & MCR_NOSLEEP)); | |
6d2010ae A |
6379 | |
6380 | /* | |
6381 | * If this is the first waiter, arm the watchdog timer. Otherwise | |
6382 | * check if we need to panic the system due to watchdog timeout. | |
6383 | */ | |
6384 | if (mb_waiters == 0) | |
6385 | microuptime(&mb_wdtstart); | |
6386 | else | |
6387 | mbuf_watchdog(); | |
6388 | ||
2d21ac55 A |
6389 | mb_waiters++; |
6390 | (void) msleep(mb_waitchan, mbuf_mlock, (PZERO-1), m_cname(class), NULL); | |
6391 | ||
6392 | /* We are now up; stop getting notified until next round */ | |
6393 | mbuf_waiter_dec(class, (wait & MCR_COMP)); | |
6394 | ||
6395 | /* We waited and got something */ | |
6396 | if (m_infree(class) > 0) { | |
6397 | mbstat.m_wait++; | |
6398 | goto done; | |
6399 | } else if (mbuf_cached_above(class, wait)) { | |
6400 | mbstat.m_wait++; | |
6401 | mcache_retry = TRUE; | |
6402 | } | |
6403 | done: | |
6404 | return (mcache_retry); | |
9bccf70c A |
6405 | } |
6406 | ||
39037602 | 6407 | __attribute__((noreturn)) |
91447636 | 6408 | static void |
2d21ac55 | 6409 | mbuf_worker_thread(void) |
1c79356b | 6410 | { |
2d21ac55 A |
6411 | int mbuf_expand; |
6412 | ||
91447636 | 6413 | while (1) { |
2d21ac55 | 6414 | lck_mtx_lock(mbuf_mlock); |
2d21ac55 | 6415 | mbuf_expand = 0; |
91447636 A |
6416 | if (mbuf_expand_mcl) { |
6417 | int n; | |
2d21ac55 A |
6418 | |
6419 | /* Adjust to current number of cluster in use */ | |
6420 | n = mbuf_expand_mcl - | |
6421 | (m_total(MC_CL) - m_infree(MC_CL)); | |
6422 | if ((n + m_total(MC_CL)) > m_maxlimit(MC_CL)) | |
6423 | n = m_maxlimit(MC_CL) - m_total(MC_CL); | |
91447636 | 6424 | mbuf_expand_mcl = 0; |
2d21ac55 A |
6425 | |
6426 | if (n > 0 && freelist_populate(MC_CL, n, M_WAIT) > 0) | |
6427 | mbuf_expand++; | |
91447636 A |
6428 | } |
6429 | if (mbuf_expand_big) { | |
6430 | int n; | |
2d21ac55 A |
6431 | |
6432 | /* Adjust to current number of 4 KB cluster in use */ | |
6433 | n = mbuf_expand_big - | |
6434 | (m_total(MC_BIGCL) - m_infree(MC_BIGCL)); | |
6435 | if ((n + m_total(MC_BIGCL)) > m_maxlimit(MC_BIGCL)) | |
6436 | n = m_maxlimit(MC_BIGCL) - m_total(MC_BIGCL); | |
91447636 | 6437 | mbuf_expand_big = 0; |
2d21ac55 A |
6438 | |
6439 | if (n > 0 && freelist_populate(MC_BIGCL, n, M_WAIT) > 0) | |
6440 | mbuf_expand++; | |
6441 | } | |
6442 | if (mbuf_expand_16k) { | |
6443 | int n; | |
6444 | ||
6445 | /* Adjust to current number of 16 KB cluster in use */ | |
6446 | n = mbuf_expand_16k - | |
6447 | (m_total(MC_16KCL) - m_infree(MC_16KCL)); | |
6448 | if ((n + m_total(MC_16KCL)) > m_maxlimit(MC_16KCL)) | |
6449 | n = m_maxlimit(MC_16KCL) - m_total(MC_16KCL); | |
6450 | mbuf_expand_16k = 0; | |
6451 | ||
6452 | if (n > 0) | |
6453 | (void) freelist_populate(MC_16KCL, n, M_WAIT); | |
6454 | } | |
6455 | ||
6456 | /* | |
6457 | * Because we can run out of memory before filling the mbuf | |
6458 | * map, we should not allocate more clusters than they are | |
6459 | * mbufs -- otherwise we could have a large number of useless | |
6460 | * clusters allocated. | |
91447636 | 6461 | */ |
2d21ac55 A |
6462 | if (mbuf_expand) { |
6463 | while (m_total(MC_MBUF) < | |
6464 | (m_total(MC_BIGCL) + m_total(MC_CL))) { | |
6465 | if (freelist_populate(MC_MBUF, 1, M_WAIT) == 0) | |
6466 | break; | |
6467 | } | |
91447636 | 6468 | } |
2d21ac55 | 6469 | |
39037602 A |
6470 | mbuf_worker_needs_wakeup = TRUE; |
6471 | assert_wait((caddr_t)&mbuf_worker_needs_wakeup, | |
6472 | THREAD_UNINT); | |
2d21ac55 | 6473 | lck_mtx_unlock(mbuf_mlock); |
2d21ac55 | 6474 | (void) thread_block((thread_continue_t)mbuf_worker_thread); |
91447636 | 6475 | } |
1c79356b A |
6476 | } |
6477 | ||
39037602 | 6478 | __attribute__((noreturn)) |
91447636 | 6479 | static void |
2d21ac55 | 6480 | mbuf_worker_thread_init(void) |
55e303ae | 6481 | { |
2d21ac55 A |
6482 | mbuf_worker_ready++; |
6483 | mbuf_worker_thread(); | |
55e303ae | 6484 | } |
1c79356b | 6485 | |
2d21ac55 A |
6486 | static mcl_slab_t * |
6487 | slab_get(void *buf) | |
6488 | { | |
6489 | mcl_slabg_t *slg; | |
6490 | unsigned int ix, k; | |
6491 | ||
6492 | lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); | |
6493 | ||
6494 | VERIFY(MBUF_IN_MAP(buf)); | |
3e170ce0 | 6495 | ix = ((unsigned char *)buf - mbutl) >> MBSHIFT; |
2d21ac55 A |
6496 | VERIFY(ix < maxslabgrp); |
6497 | ||
6498 | if ((slg = slabstbl[ix]) == NULL) { | |
6499 | /* | |
39037602 | 6500 | * In the current implementation, we never shrink the slabs |
fe8ab488 A |
6501 | * table; if we attempt to reallocate a cluster group when |
6502 | * it's already allocated, panic since this is a sign of a | |
6503 | * memory corruption (slabstbl[ix] got nullified). | |
2d21ac55 A |
6504 | */ |
6505 | ++slabgrp; | |
6506 | VERIFY(ix < slabgrp); | |
6507 | /* | |
6508 | * Slabs expansion can only be done single threaded; when | |
6509 | * we get here, it must be as a result of m_clalloc() which | |
6510 | * is serialized and therefore mb_clalloc_busy must be set. | |
6511 | */ | |
6512 | VERIFY(mb_clalloc_busy); | |
6513 | lck_mtx_unlock(mbuf_mlock); | |
6514 | ||
6515 | /* This is a new buffer; create the slabs group for it */ | |
6516 | MALLOC(slg, mcl_slabg_t *, sizeof (*slg), M_TEMP, | |
6517 | M_WAITOK | M_ZERO); | |
3e170ce0 A |
6518 | MALLOC(slg->slg_slab, mcl_slab_t *, sizeof(mcl_slab_t) * NSLABSPMB, |
6519 | M_TEMP, M_WAITOK | M_ZERO); | |
6520 | VERIFY(slg != NULL && slg->slg_slab != NULL); | |
2d21ac55 A |
6521 | |
6522 | lck_mtx_lock(mbuf_mlock); | |
6523 | /* | |
6524 | * No other thread could have gone into m_clalloc() after | |
6525 | * we dropped the lock above, so verify that it's true. | |
6526 | */ | |
6527 | VERIFY(mb_clalloc_busy); | |
6528 | ||
6529 | slabstbl[ix] = slg; | |
6530 | ||
6531 | /* Chain each slab in the group to its forward neighbor */ | |
6532 | for (k = 1; k < NSLABSPMB; k++) | |
6533 | slg->slg_slab[k - 1].sl_next = &slg->slg_slab[k]; | |
6534 | VERIFY(slg->slg_slab[NSLABSPMB - 1].sl_next == NULL); | |
6535 | ||
6536 | /* And chain the last slab in the previous group to this */ | |
6537 | if (ix > 0) { | |
6538 | VERIFY(slabstbl[ix - 1]-> | |
6539 | slg_slab[NSLABSPMB - 1].sl_next == NULL); | |
6540 | slabstbl[ix - 1]->slg_slab[NSLABSPMB - 1].sl_next = | |
6541 | &slg->slg_slab[0]; | |
6542 | } | |
6543 | } | |
6544 | ||
3e170ce0 | 6545 | ix = MTOPG(buf) % NSLABSPMB; |
2d21ac55 A |
6546 | VERIFY(ix < NSLABSPMB); |
6547 | ||
6548 | return (&slg->slg_slab[ix]); | |
6549 | } | |
6550 | ||
6551 | static void | |
6552 | slab_init(mcl_slab_t *sp, mbuf_class_t class, u_int32_t flags, | |
6553 | void *base, void *head, unsigned int len, int refcnt, int chunks) | |
6554 | { | |
6555 | sp->sl_class = class; | |
6556 | sp->sl_flags = flags; | |
6557 | sp->sl_base = base; | |
6558 | sp->sl_head = head; | |
6559 | sp->sl_len = len; | |
6560 | sp->sl_refcnt = refcnt; | |
6561 | sp->sl_chunks = chunks; | |
6562 | slab_detach(sp); | |
6563 | } | |
6564 | ||
6565 | static void | |
6566 | slab_insert(mcl_slab_t *sp, mbuf_class_t class) | |
6567 | { | |
6568 | VERIFY(slab_is_detached(sp)); | |
6569 | m_slab_cnt(class)++; | |
6570 | TAILQ_INSERT_TAIL(&m_slablist(class), sp, sl_link); | |
6571 | sp->sl_flags &= ~SLF_DETACHED; | |
3e170ce0 A |
6572 | |
6573 | /* | |
6574 | * If a buffer spans multiple contiguous pages then mark them as | |
6575 | * detached too | |
6576 | */ | |
6d2010ae | 6577 | if (class == MC_16KCL) { |
2d21ac55 | 6578 | int k; |
6d2010ae | 6579 | for (k = 1; k < NSLABSP16KB; k++) { |
2d21ac55 A |
6580 | sp = sp->sl_next; |
6581 | /* Next slab must already be present */ | |
3e170ce0 | 6582 | VERIFY(sp != NULL && slab_is_detached(sp)); |
2d21ac55 A |
6583 | sp->sl_flags &= ~SLF_DETACHED; |
6584 | } | |
6585 | } | |
6586 | } | |
6587 | ||
6588 | static void | |
6589 | slab_remove(mcl_slab_t *sp, mbuf_class_t class) | |
6590 | { | |
3e170ce0 | 6591 | int k; |
2d21ac55 A |
6592 | VERIFY(!slab_is_detached(sp)); |
6593 | VERIFY(m_slab_cnt(class) > 0); | |
6594 | m_slab_cnt(class)--; | |
6595 | TAILQ_REMOVE(&m_slablist(class), sp, sl_link); | |
6596 | slab_detach(sp); | |
6d2010ae | 6597 | if (class == MC_16KCL) { |
6d2010ae | 6598 | for (k = 1; k < NSLABSP16KB; k++) { |
2d21ac55 A |
6599 | sp = sp->sl_next; |
6600 | /* Next slab must already be present */ | |
6601 | VERIFY(sp != NULL); | |
6602 | VERIFY(!slab_is_detached(sp)); | |
6603 | slab_detach(sp); | |
6604 | } | |
6605 | } | |
6606 | } | |
6607 | ||
6608 | static boolean_t | |
6609 | slab_inrange(mcl_slab_t *sp, void *buf) | |
6610 | { | |
6611 | return ((uintptr_t)buf >= (uintptr_t)sp->sl_base && | |
6612 | (uintptr_t)buf < ((uintptr_t)sp->sl_base + sp->sl_len)); | |
6613 | } | |
6614 | ||
b0d623f7 | 6615 | #undef panic |
2d21ac55 A |
6616 | |
6617 | static void | |
6618 | slab_nextptr_panic(mcl_slab_t *sp, void *addr) | |
6619 | { | |
6620 | int i; | |
6621 | unsigned int chunk_len = sp->sl_len / sp->sl_chunks; | |
6622 | uintptr_t buf = (uintptr_t)sp->sl_base; | |
6623 | ||
6624 | for (i = 0; i < sp->sl_chunks; i++, buf += chunk_len) { | |
6625 | void *next = ((mcache_obj_t *)buf)->obj_next; | |
6626 | if (next != addr) | |
6627 | continue; | |
6d2010ae | 6628 | if (!mclverify) { |
2d21ac55 A |
6629 | if (next != NULL && !MBUF_IN_MAP(next)) { |
6630 | mcache_t *cp = m_cache(sp->sl_class); | |
6631 | panic("%s: %s buffer %p in slab %p modified " | |
6632 | "after free at offset 0: %p out of range " | |
6633 | "[%p-%p)\n", __func__, cp->mc_name, | |
6634 | (void *)buf, sp, next, mbutl, embutl); | |
6635 | /* NOTREACHED */ | |
6636 | } | |
6637 | } else { | |
6638 | mcache_audit_t *mca = mcl_audit_buf2mca(sp->sl_class, | |
6639 | (mcache_obj_t *)buf); | |
6640 | mcl_audit_verify_nextptr(next, mca); | |
6641 | } | |
6642 | } | |
6643 | } | |
6644 | ||
6645 | static void | |
6646 | slab_detach(mcl_slab_t *sp) | |
6647 | { | |
6648 | sp->sl_link.tqe_next = (mcl_slab_t *)-1; | |
6649 | sp->sl_link.tqe_prev = (mcl_slab_t **)-1; | |
6650 | sp->sl_flags |= SLF_DETACHED; | |
6651 | } | |
6652 | ||
6653 | static boolean_t | |
6654 | slab_is_detached(mcl_slab_t *sp) | |
6655 | { | |
6656 | return ((intptr_t)sp->sl_link.tqe_next == -1 && | |
6657 | (intptr_t)sp->sl_link.tqe_prev == -1 && | |
6658 | (sp->sl_flags & SLF_DETACHED)); | |
6659 | } | |
6660 | ||
6661 | static void | |
6662 | mcl_audit_init(void *buf, mcache_audit_t **mca_list, | |
6663 | mcache_obj_t **con_list, size_t con_size, unsigned int num) | |
6664 | { | |
6665 | mcache_audit_t *mca, *mca_tail; | |
6666 | mcache_obj_t *con = NULL; | |
6667 | boolean_t save_contents = (con_list != NULL); | |
6668 | unsigned int i, ix; | |
6669 | ||
3e170ce0 | 6670 | ASSERT(num <= NMBPG); |
2d21ac55 A |
6671 | ASSERT(con_list == NULL || con_size != 0); |
6672 | ||
3e170ce0 | 6673 | ix = MTOPG(buf); |
6d2010ae A |
6674 | VERIFY(ix < maxclaudit); |
6675 | ||
2d21ac55 | 6676 | /* Make sure we haven't been here before */ |
3e170ce0 | 6677 | for (i = 0; i < NMBPG; i++) |
2d21ac55 A |
6678 | VERIFY(mclaudit[ix].cl_audit[i] == NULL); |
6679 | ||
6680 | mca = mca_tail = *mca_list; | |
6681 | if (save_contents) | |
6682 | con = *con_list; | |
6683 | ||
6684 | for (i = 0; i < num; i++) { | |
6685 | mcache_audit_t *next; | |
6686 | ||
6687 | next = mca->mca_next; | |
6688 | bzero(mca, sizeof (*mca)); | |
6689 | mca->mca_next = next; | |
6690 | mclaudit[ix].cl_audit[i] = mca; | |
6691 | ||
6692 | /* Attach the contents buffer if requested */ | |
6693 | if (save_contents) { | |
39236c6e A |
6694 | mcl_saved_contents_t *msc = |
6695 | (mcl_saved_contents_t *)(void *)con; | |
6696 | ||
6697 | VERIFY(msc != NULL); | |
6698 | VERIFY(IS_P2ALIGNED(msc, sizeof (u_int64_t))); | |
6699 | VERIFY(con_size == sizeof (*msc)); | |
2d21ac55 | 6700 | mca->mca_contents_size = con_size; |
39236c6e | 6701 | mca->mca_contents = msc; |
2d21ac55 A |
6702 | con = con->obj_next; |
6703 | bzero(mca->mca_contents, mca->mca_contents_size); | |
6704 | } | |
6705 | ||
6706 | mca_tail = mca; | |
6707 | mca = mca->mca_next; | |
6708 | } | |
91447636 | 6709 | |
2d21ac55 A |
6710 | if (save_contents) |
6711 | *con_list = con; | |
6712 | ||
6713 | *mca_list = mca_tail->mca_next; | |
6714 | mca_tail->mca_next = NULL; | |
6715 | } | |
6716 | ||
fe8ab488 A |
6717 | static void |
6718 | mcl_audit_free(void *buf, unsigned int num) | |
6719 | { | |
6720 | unsigned int i, ix; | |
6721 | mcache_audit_t *mca, *mca_list; | |
6722 | ||
3e170ce0 | 6723 | ix = MTOPG(buf); |
fe8ab488 | 6724 | VERIFY(ix < maxclaudit); |
39037602 | 6725 | |
fe8ab488 A |
6726 | if (mclaudit[ix].cl_audit[0] != NULL) { |
6727 | mca_list = mclaudit[ix].cl_audit[0]; | |
6728 | for (i = 0; i < num; i++) { | |
6729 | mca = mclaudit[ix].cl_audit[i]; | |
6730 | mclaudit[ix].cl_audit[i] = NULL; | |
6731 | if (mca->mca_contents) | |
6732 | mcache_free(mcl_audit_con_cache, | |
6733 | mca->mca_contents); | |
6734 | } | |
6735 | mcache_free_ext(mcache_audit_cache, | |
6736 | (mcache_obj_t *)mca_list); | |
6737 | } | |
6738 | } | |
6739 | ||
2d21ac55 | 6740 | /* |
6d2010ae | 6741 | * Given an address of a buffer (mbuf/2KB/4KB/16KB), return |
2d21ac55 A |
6742 | * the corresponding audit structure for that buffer. |
6743 | */ | |
6744 | static mcache_audit_t * | |
3e170ce0 | 6745 | mcl_audit_buf2mca(mbuf_class_t class, mcache_obj_t *mobj) |
2d21ac55 A |
6746 | { |
6747 | mcache_audit_t *mca = NULL; | |
3e170ce0 A |
6748 | int ix = MTOPG(mobj), m_idx = 0; |
6749 | unsigned char *page_addr; | |
2d21ac55 | 6750 | |
6d2010ae | 6751 | VERIFY(ix < maxclaudit); |
3e170ce0 A |
6752 | VERIFY(IS_P2ALIGNED(mobj, MIN(m_maxsize(class), PAGE_SIZE))); |
6753 | ||
6754 | page_addr = PGTOM(ix); | |
2d21ac55 A |
6755 | |
6756 | switch (class) { | |
6757 | case MC_MBUF: | |
6758 | /* | |
6d2010ae | 6759 | * For the mbuf case, find the index of the page |
2d21ac55 | 6760 | * used by the mbuf and use that index to locate the |
6d2010ae A |
6761 | * base address of the page. Then find out the |
6762 | * mbuf index relative to the page base and use | |
2d21ac55 A |
6763 | * it to locate the audit structure. |
6764 | */ | |
3e170ce0 A |
6765 | m_idx = MBPAGEIDX(page_addr, mobj); |
6766 | VERIFY(m_idx < (int)NMBPG); | |
6767 | mca = mclaudit[ix].cl_audit[m_idx]; | |
2d21ac55 A |
6768 | break; |
6769 | ||
6770 | case MC_CL: | |
6d2010ae A |
6771 | /* |
6772 | * Same thing as above, but for 2KB clusters in a page. | |
6773 | */ | |
3e170ce0 A |
6774 | m_idx = CLPAGEIDX(page_addr, mobj); |
6775 | VERIFY(m_idx < (int)NCLPG); | |
6776 | mca = mclaudit[ix].cl_audit[m_idx]; | |
6d2010ae A |
6777 | break; |
6778 | ||
2d21ac55 | 6779 | case MC_BIGCL: |
3e170ce0 A |
6780 | m_idx = BCLPAGEIDX(page_addr, mobj); |
6781 | VERIFY(m_idx < (int)NBCLPG); | |
6782 | mca = mclaudit[ix].cl_audit[m_idx]; | |
6783 | break; | |
2d21ac55 A |
6784 | case MC_16KCL: |
6785 | /* | |
6786 | * Same as above, but only return the first element. | |
6787 | */ | |
6788 | mca = mclaudit[ix].cl_audit[0]; | |
6789 | break; | |
6790 | ||
6791 | default: | |
6792 | VERIFY(0); | |
6793 | /* NOTREACHED */ | |
6794 | } | |
6795 | ||
6796 | return (mca); | |
6797 | } | |
6798 | ||
6799 | static void | |
6800 | mcl_audit_mbuf(mcache_audit_t *mca, void *addr, boolean_t composite, | |
6801 | boolean_t alloc) | |
6802 | { | |
6803 | struct mbuf *m = addr; | |
6804 | mcache_obj_t *next = ((mcache_obj_t *)m)->obj_next; | |
6805 | ||
6806 | VERIFY(mca->mca_contents != NULL && | |
6807 | mca->mca_contents_size == AUDIT_CONTENTS_SIZE); | |
6808 | ||
6d2010ae A |
6809 | if (mclverify) |
6810 | mcl_audit_verify_nextptr(next, mca); | |
2d21ac55 A |
6811 | |
6812 | if (!alloc) { | |
6813 | /* Save constructed mbuf fields */ | |
6814 | mcl_audit_save_mbuf(m, mca); | |
6d2010ae A |
6815 | if (mclverify) { |
6816 | mcache_set_pattern(MCACHE_FREE_PATTERN, m, | |
6817 | m_maxsize(MC_MBUF)); | |
6818 | } | |
2d21ac55 A |
6819 | ((mcache_obj_t *)m)->obj_next = next; |
6820 | return; | |
6821 | } | |
6822 | ||
6823 | /* Check if the buffer has been corrupted while in freelist */ | |
6d2010ae A |
6824 | if (mclverify) { |
6825 | mcache_audit_free_verify_set(mca, addr, 0, m_maxsize(MC_MBUF)); | |
6826 | } | |
2d21ac55 A |
6827 | /* Restore constructed mbuf fields */ |
6828 | mcl_audit_restore_mbuf(m, mca, composite); | |
6829 | } | |
6830 | ||
6831 | static void | |
6832 | mcl_audit_restore_mbuf(struct mbuf *m, mcache_audit_t *mca, boolean_t composite) | |
6833 | { | |
39236c6e | 6834 | struct mbuf *ms = MCA_SAVED_MBUF_PTR(mca); |
2d21ac55 A |
6835 | |
6836 | if (composite) { | |
6837 | struct mbuf *next = m->m_next; | |
6838 | VERIFY(ms->m_flags == M_EXT && MEXT_RFA(ms) != NULL && | |
6839 | MBUF_IS_COMPOSITE(ms)); | |
39236c6e | 6840 | VERIFY(mca->mca_contents_size == AUDIT_CONTENTS_SIZE); |
2d21ac55 A |
6841 | /* |
6842 | * We could have hand-picked the mbuf fields and restore | |
6843 | * them individually, but that will be a maintenance | |
6844 | * headache. Instead, restore everything that was saved; | |
6845 | * the mbuf layer will recheck and reinitialize anyway. | |
6846 | */ | |
39236c6e | 6847 | bcopy(ms, m, MCA_SAVED_MBUF_SIZE); |
2d21ac55 A |
6848 | m->m_next = next; |
6849 | } else { | |
6850 | /* | |
6851 | * For a regular mbuf (no cluster attached) there's nothing | |
6852 | * to restore other than the type field, which is expected | |
6853 | * to be MT_FREE. | |
6854 | */ | |
6855 | m->m_type = ms->m_type; | |
6856 | } | |
6857 | _MCHECK(m); | |
6858 | } | |
6859 | ||
6860 | static void | |
6861 | mcl_audit_save_mbuf(struct mbuf *m, mcache_audit_t *mca) | |
6862 | { | |
39236c6e | 6863 | VERIFY(mca->mca_contents_size == AUDIT_CONTENTS_SIZE); |
2d21ac55 | 6864 | _MCHECK(m); |
39236c6e | 6865 | bcopy(m, MCA_SAVED_MBUF_PTR(mca), MCA_SAVED_MBUF_SIZE); |
2d21ac55 A |
6866 | } |
6867 | ||
6868 | static void | |
6869 | mcl_audit_cluster(mcache_audit_t *mca, void *addr, size_t size, boolean_t alloc, | |
6870 | boolean_t save_next) | |
6871 | { | |
6872 | mcache_obj_t *next = ((mcache_obj_t *)addr)->obj_next; | |
6873 | ||
6874 | if (!alloc) { | |
6d2010ae A |
6875 | if (mclverify) { |
6876 | mcache_set_pattern(MCACHE_FREE_PATTERN, addr, size); | |
6877 | } | |
2d21ac55 A |
6878 | if (save_next) { |
6879 | mcl_audit_verify_nextptr(next, mca); | |
6880 | ((mcache_obj_t *)addr)->obj_next = next; | |
6881 | } | |
6d2010ae | 6882 | } else if (mclverify) { |
2d21ac55 A |
6883 | /* Check if the buffer has been corrupted while in freelist */ |
6884 | mcl_audit_verify_nextptr(next, mca); | |
6885 | mcache_audit_free_verify_set(mca, addr, 0, size); | |
6886 | } | |
6887 | } | |
6888 | ||
39236c6e A |
6889 | static void |
6890 | mcl_audit_scratch(mcache_audit_t *mca) | |
6891 | { | |
6892 | void *stack[MCACHE_STACK_DEPTH + 1]; | |
6893 | mcl_scratch_audit_t *msa; | |
6894 | struct timeval now; | |
6895 | ||
6896 | VERIFY(mca->mca_contents != NULL); | |
6897 | msa = MCA_SAVED_SCRATCH_PTR(mca); | |
6898 | ||
6899 | msa->msa_pthread = msa->msa_thread; | |
6900 | msa->msa_thread = current_thread(); | |
6901 | bcopy(msa->msa_stack, msa->msa_pstack, sizeof (msa->msa_pstack)); | |
6902 | msa->msa_pdepth = msa->msa_depth; | |
6903 | bzero(stack, sizeof (stack)); | |
6904 | msa->msa_depth = OSBacktrace(stack, MCACHE_STACK_DEPTH + 1) - 1; | |
fe8ab488 | 6905 | bcopy(&stack[1], msa->msa_stack, sizeof (msa->msa_stack)); |
39236c6e A |
6906 | |
6907 | msa->msa_ptstamp = msa->msa_tstamp; | |
6908 | microuptime(&now); | |
6909 | /* tstamp is in ms relative to base_ts */ | |
6910 | msa->msa_tstamp = ((now.tv_usec - mb_start.tv_usec) / 1000); | |
6911 | if ((now.tv_sec - mb_start.tv_sec) > 0) | |
6912 | msa->msa_tstamp += ((now.tv_sec - mb_start.tv_sec) * 1000); | |
6913 | } | |
6914 | ||
2d21ac55 A |
6915 | static void |
6916 | mcl_audit_mcheck_panic(struct mbuf *m) | |
6917 | { | |
6918 | mcache_audit_t *mca; | |
6919 | ||
6920 | MRANGE(m); | |
6921 | mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m); | |
6922 | ||
6923 | panic("mcl_audit: freed mbuf %p with type 0x%x (instead of 0x%x)\n%s\n", | |
6924 | m, (u_int16_t)m->m_type, MT_FREE, mcache_dump_mca(mca)); | |
6925 | /* NOTREACHED */ | |
6926 | } | |
6927 | ||
6928 | static void | |
6929 | mcl_audit_verify_nextptr(void *next, mcache_audit_t *mca) | |
6930 | { | |
6d2010ae A |
6931 | if (next != NULL && !MBUF_IN_MAP(next) && |
6932 | (next != (void *)MCACHE_FREE_PATTERN || !mclverify)) { | |
2d21ac55 A |
6933 | panic("mcl_audit: buffer %p modified after free at offset 0: " |
6934 | "%p out of range [%p-%p)\n%s\n", | |
6935 | mca->mca_addr, next, mbutl, embutl, mcache_dump_mca(mca)); | |
6936 | /* NOTREACHED */ | |
6937 | } | |
6938 | } | |
6939 | ||
6d2010ae A |
6940 | /* This function turns on mbuf leak detection */ |
6941 | static void | |
6942 | mleak_activate(void) | |
6943 | { | |
6944 | mleak_table.mleak_sample_factor = MLEAK_SAMPLE_FACTOR; | |
6945 | PE_parse_boot_argn("mleak_sample_factor", | |
6946 | &mleak_table.mleak_sample_factor, | |
6947 | sizeof (mleak_table.mleak_sample_factor)); | |
6948 | ||
6949 | if (mleak_table.mleak_sample_factor == 0) | |
6950 | mclfindleak = 0; | |
6951 | ||
6952 | if (mclfindleak == 0) | |
6953 | return; | |
6954 | ||
6955 | vm_size_t alloc_size = | |
6956 | mleak_alloc_buckets * sizeof (struct mallocation); | |
6957 | vm_size_t trace_size = mleak_trace_buckets * sizeof (struct mtrace); | |
6958 | ||
6959 | MALLOC(mleak_allocations, struct mallocation *, alloc_size, | |
6960 | M_TEMP, M_WAITOK | M_ZERO); | |
6961 | VERIFY(mleak_allocations != NULL); | |
6962 | ||
6963 | MALLOC(mleak_traces, struct mtrace *, trace_size, | |
6964 | M_TEMP, M_WAITOK | M_ZERO); | |
6965 | VERIFY(mleak_traces != NULL); | |
6966 | ||
6967 | MALLOC(mleak_stat, mleak_stat_t *, MLEAK_STAT_SIZE(MLEAK_NUM_TRACES), | |
6968 | M_TEMP, M_WAITOK | M_ZERO); | |
6969 | VERIFY(mleak_stat != NULL); | |
6970 | mleak_stat->ml_cnt = MLEAK_NUM_TRACES; | |
6971 | #ifdef __LP64__ | |
6972 | mleak_stat->ml_isaddr64 = 1; | |
6973 | #endif /* __LP64__ */ | |
6974 | } | |
6975 | ||
6976 | static void | |
6977 | mleak_logger(u_int32_t num, mcache_obj_t *addr, boolean_t alloc) | |
6978 | { | |
6979 | int temp; | |
6980 | ||
6981 | if (mclfindleak == 0) | |
6982 | return; | |
6983 | ||
6984 | if (!alloc) | |
6985 | return (mleak_free(addr)); | |
6986 | ||
6987 | temp = atomic_add_32_ov(&mleak_table.mleak_capture, 1); | |
6988 | ||
6989 | if ((temp % mleak_table.mleak_sample_factor) == 0 && addr != NULL) { | |
6990 | uintptr_t bt[MLEAK_STACK_DEPTH]; | |
39037602 | 6991 | int logged = backtrace(bt, MLEAK_STACK_DEPTH); |
6d2010ae A |
6992 | mleak_log(bt, addr, logged, num); |
6993 | } | |
6994 | } | |
6995 | ||
6996 | /* | |
6997 | * This function records the allocation in the mleak_allocations table | |
6998 | * and the backtrace in the mleak_traces table; if allocation slot is in use, | |
6999 | * replace old allocation with new one if the trace slot is in use, return | |
7000 | * (or increment refcount if same trace). | |
7001 | */ | |
7002 | static boolean_t | |
7003 | mleak_log(uintptr_t *bt, mcache_obj_t *addr, uint32_t depth, int num) | |
7004 | { | |
7005 | struct mallocation *allocation; | |
7006 | struct mtrace *trace; | |
7007 | uint32_t trace_index; | |
6d2010ae A |
7008 | |
7009 | /* Quit if someone else modifying the tables */ | |
7010 | if (!lck_mtx_try_lock_spin(mleak_lock)) { | |
7011 | mleak_table.total_conflicts++; | |
7012 | return (FALSE); | |
7013 | } | |
7014 | ||
7015 | allocation = &mleak_allocations[hashaddr((uintptr_t)addr, | |
7016 | mleak_alloc_buckets)]; | |
7017 | trace_index = hashbacktrace(bt, depth, mleak_trace_buckets); | |
7018 | trace = &mleak_traces[trace_index]; | |
7019 | ||
7020 | VERIFY(allocation <= &mleak_allocations[mleak_alloc_buckets - 1]); | |
7021 | VERIFY(trace <= &mleak_traces[mleak_trace_buckets - 1]); | |
7022 | ||
7023 | allocation->hitcount++; | |
7024 | trace->hitcount++; | |
7025 | ||
7026 | /* | |
7027 | * If the allocation bucket we want is occupied | |
7028 | * and the occupier has the same trace, just bail. | |
7029 | */ | |
7030 | if (allocation->element != NULL && | |
7031 | trace_index == allocation->trace_index) { | |
7032 | mleak_table.alloc_collisions++; | |
7033 | lck_mtx_unlock(mleak_lock); | |
7034 | return (TRUE); | |
7035 | } | |
7036 | ||
7037 | /* | |
7038 | * Store the backtrace in the traces array; | |
7039 | * Size of zero = trace bucket is free. | |
7040 | */ | |
7041 | if (trace->allocs > 0 && | |
7042 | bcmp(trace->addr, bt, (depth * sizeof (uintptr_t))) != 0) { | |
7043 | /* Different, unique trace, but the same hash! Bail out. */ | |
7044 | trace->collisions++; | |
7045 | mleak_table.trace_collisions++; | |
7046 | lck_mtx_unlock(mleak_lock); | |
7047 | return (TRUE); | |
7048 | } else if (trace->allocs > 0) { | |
7049 | /* Same trace, already added, so increment refcount */ | |
7050 | trace->allocs++; | |
7051 | } else { | |
7052 | /* Found an unused trace bucket, so record the trace here */ | |
7053 | if (trace->depth != 0) { | |
7054 | /* this slot previously used but not currently in use */ | |
7055 | mleak_table.trace_overwrites++; | |
7056 | } | |
7057 | mleak_table.trace_recorded++; | |
7058 | trace->allocs = 1; | |
7059 | memcpy(trace->addr, bt, (depth * sizeof (uintptr_t))); | |
7060 | trace->depth = depth; | |
7061 | trace->collisions = 0; | |
7062 | } | |
7063 | ||
7064 | /* Step 2: Store the allocation record in the allocations array */ | |
7065 | if (allocation->element != NULL) { | |
7066 | /* | |
7067 | * Replace an existing allocation. No need to preserve | |
7068 | * because only a subset of the allocations are being | |
7069 | * recorded anyway. | |
7070 | */ | |
7071 | mleak_table.alloc_collisions++; | |
7072 | } else if (allocation->trace_index != 0) { | |
7073 | mleak_table.alloc_overwrites++; | |
7074 | } | |
7075 | allocation->element = addr; | |
7076 | allocation->trace_index = trace_index; | |
7077 | allocation->count = num; | |
7078 | mleak_table.alloc_recorded++; | |
7079 | mleak_table.outstanding_allocs++; | |
7080 | ||
6d2010ae A |
7081 | lck_mtx_unlock(mleak_lock); |
7082 | return (TRUE); | |
7083 | } | |
7084 | ||
7085 | static void | |
7086 | mleak_free(mcache_obj_t *addr) | |
7087 | { | |
7088 | while (addr != NULL) { | |
7089 | struct mallocation *allocation = &mleak_allocations | |
7090 | [hashaddr((uintptr_t)addr, mleak_alloc_buckets)]; | |
7091 | ||
7092 | if (allocation->element == addr && | |
7093 | allocation->trace_index < mleak_trace_buckets) { | |
7094 | lck_mtx_lock_spin(mleak_lock); | |
7095 | if (allocation->element == addr && | |
7096 | allocation->trace_index < mleak_trace_buckets) { | |
7097 | struct mtrace *trace; | |
7098 | trace = &mleak_traces[allocation->trace_index]; | |
7099 | /* allocs = 0 means trace bucket is unused */ | |
7100 | if (trace->allocs > 0) | |
7101 | trace->allocs--; | |
7102 | if (trace->allocs == 0) | |
7103 | trace->depth = 0; | |
7104 | /* NULL element means alloc bucket is unused */ | |
7105 | allocation->element = NULL; | |
7106 | mleak_table.outstanding_allocs--; | |
7107 | } | |
7108 | lck_mtx_unlock(mleak_lock); | |
7109 | } | |
7110 | addr = addr->obj_next; | |
7111 | } | |
7112 | } | |
7113 | ||
316670eb A |
7114 | static void |
7115 | mleak_sort_traces() | |
7116 | { | |
7117 | int i, j, k; | |
7118 | struct mtrace *swap; | |
7119 | ||
7120 | for(i = 0; i < MLEAK_NUM_TRACES; i++) | |
7121 | mleak_top_trace[i] = NULL; | |
7122 | ||
7123 | for(i = 0, j = 0; j < MLEAK_NUM_TRACES && i < mleak_trace_buckets; i++) | |
7124 | { | |
7125 | if (mleak_traces[i].allocs <= 0) | |
7126 | continue; | |
7127 | ||
7128 | mleak_top_trace[j] = &mleak_traces[i]; | |
7129 | for (k = j; k > 0; k--) { | |
7130 | if (mleak_top_trace[k]->allocs <= | |
7131 | mleak_top_trace[k-1]->allocs) | |
7132 | break; | |
7133 | ||
7134 | swap = mleak_top_trace[k-1]; | |
7135 | mleak_top_trace[k-1] = mleak_top_trace[k]; | |
7136 | mleak_top_trace[k] = swap; | |
7137 | } | |
7138 | j++; | |
7139 | } | |
7140 | ||
7141 | j--; | |
7142 | for(; i < mleak_trace_buckets; i++) { | |
7143 | if (mleak_traces[i].allocs <= mleak_top_trace[j]->allocs) | |
7144 | continue; | |
7145 | ||
7146 | mleak_top_trace[j] = &mleak_traces[i]; | |
7147 | ||
7148 | for (k = j; k > 0; k--) { | |
7149 | if (mleak_top_trace[k]->allocs <= | |
7150 | mleak_top_trace[k-1]->allocs) | |
7151 | break; | |
7152 | ||
7153 | swap = mleak_top_trace[k-1]; | |
7154 | mleak_top_trace[k-1] = mleak_top_trace[k]; | |
7155 | mleak_top_trace[k] = swap; | |
7156 | } | |
7157 | } | |
7158 | } | |
7159 | ||
7160 | static void | |
7161 | mleak_update_stats() | |
7162 | { | |
7163 | mleak_trace_stat_t *mltr; | |
7164 | int i; | |
7165 | ||
7166 | VERIFY(mleak_stat != NULL); | |
7167 | #ifdef __LP64__ | |
7168 | VERIFY(mleak_stat->ml_isaddr64); | |
7169 | #else | |
7170 | VERIFY(!mleak_stat->ml_isaddr64); | |
7171 | #endif /* !__LP64__ */ | |
7172 | VERIFY(mleak_stat->ml_cnt == MLEAK_NUM_TRACES); | |
7173 | ||
7174 | mleak_sort_traces(); | |
7175 | ||
7176 | mltr = &mleak_stat->ml_trace[0]; | |
7177 | bzero(mltr, sizeof (*mltr) * MLEAK_NUM_TRACES); | |
7178 | for (i = 0; i < MLEAK_NUM_TRACES; i++) { | |
7179 | int j; | |
7180 | ||
7181 | if (mleak_top_trace[i] == NULL || | |
7182 | mleak_top_trace[i]->allocs == 0) | |
7183 | continue; | |
7184 | ||
7185 | mltr->mltr_collisions = mleak_top_trace[i]->collisions; | |
7186 | mltr->mltr_hitcount = mleak_top_trace[i]->hitcount; | |
7187 | mltr->mltr_allocs = mleak_top_trace[i]->allocs; | |
7188 | mltr->mltr_depth = mleak_top_trace[i]->depth; | |
7189 | ||
7190 | VERIFY(mltr->mltr_depth <= MLEAK_STACK_DEPTH); | |
7191 | for (j = 0; j < mltr->mltr_depth; j++) | |
7192 | mltr->mltr_addr[j] = mleak_top_trace[i]->addr[j]; | |
7193 | ||
7194 | mltr++; | |
7195 | } | |
7196 | } | |
7197 | ||
6d2010ae A |
7198 | static struct mbtypes { |
7199 | int mt_type; | |
7200 | const char *mt_name; | |
7201 | } mbtypes[] = { | |
7202 | { MT_DATA, "data" }, | |
7203 | { MT_OOBDATA, "oob data" }, | |
7204 | { MT_CONTROL, "ancillary data" }, | |
7205 | { MT_HEADER, "packet headers" }, | |
7206 | { MT_SOCKET, "socket structures" }, | |
7207 | { MT_PCB, "protocol control blocks" }, | |
7208 | { MT_RTABLE, "routing table entries" }, | |
7209 | { MT_HTABLE, "IMP host table entries" }, | |
7210 | { MT_ATABLE, "address resolution tables" }, | |
7211 | { MT_FTABLE, "fragment reassembly queue headers" }, | |
7212 | { MT_SONAME, "socket names and addresses" }, | |
7213 | { MT_SOOPTS, "socket options" }, | |
7214 | { MT_RIGHTS, "access rights" }, | |
7215 | { MT_IFADDR, "interface addresses" }, | |
7216 | { MT_TAG, "packet tags" }, | |
7217 | { 0, NULL } | |
7218 | }; | |
7219 | ||
7220 | #define MBUF_DUMP_BUF_CHK() { \ | |
7221 | clen -= k; \ | |
7222 | if (clen < 1) \ | |
7223 | goto done; \ | |
7224 | c += k; \ | |
7225 | } | |
7226 | ||
7227 | static char * | |
7228 | mbuf_dump(void) | |
7229 | { | |
7230 | unsigned long totmem = 0, totfree = 0, totmbufs, totused, totpct; | |
7231 | u_int32_t m_mbufs = 0, m_clfree = 0, m_bigclfree = 0; | |
7232 | u_int32_t m_mbufclfree = 0, m_mbufbigclfree = 0; | |
7233 | u_int32_t m_16kclusters = 0, m_16kclfree = 0, m_mbuf16kclfree = 0; | |
7234 | int nmbtypes = sizeof (mbstat.m_mtypes) / sizeof (short); | |
7235 | uint8_t seen[256]; | |
7236 | struct mbtypes *mp; | |
7237 | mb_class_stat_t *sp; | |
316670eb | 7238 | mleak_trace_stat_t *mltr; |
6d2010ae | 7239 | char *c = mbuf_dump_buf; |
316670eb | 7240 | int i, k, clen = MBUF_DUMP_BUF_SIZE; |
6d2010ae A |
7241 | |
7242 | mbuf_dump_buf[0] = '\0'; | |
7243 | ||
7244 | /* synchronize all statistics in the mbuf table */ | |
7245 | mbuf_stat_sync(); | |
7246 | mbuf_mtypes_sync(TRUE); | |
7247 | ||
7248 | sp = &mb_stat->mbs_class[0]; | |
7249 | for (i = 0; i < mb_stat->mbs_cnt; i++, sp++) { | |
7250 | u_int32_t mem; | |
7251 | ||
7252 | if (m_class(i) == MC_MBUF) { | |
7253 | m_mbufs = sp->mbcl_active; | |
7254 | } else if (m_class(i) == MC_CL) { | |
7255 | m_clfree = sp->mbcl_total - sp->mbcl_active; | |
7256 | } else if (m_class(i) == MC_BIGCL) { | |
7257 | m_bigclfree = sp->mbcl_total - sp->mbcl_active; | |
7258 | } else if (njcl > 0 && m_class(i) == MC_16KCL) { | |
7259 | m_16kclfree = sp->mbcl_total - sp->mbcl_active; | |
7260 | m_16kclusters = sp->mbcl_total; | |
7261 | } else if (m_class(i) == MC_MBUF_CL) { | |
7262 | m_mbufclfree = sp->mbcl_total - sp->mbcl_active; | |
7263 | } else if (m_class(i) == MC_MBUF_BIGCL) { | |
7264 | m_mbufbigclfree = sp->mbcl_total - sp->mbcl_active; | |
7265 | } else if (njcl > 0 && m_class(i) == MC_MBUF_16KCL) { | |
7266 | m_mbuf16kclfree = sp->mbcl_total - sp->mbcl_active; | |
7267 | } | |
7268 | ||
7269 | mem = sp->mbcl_ctotal * sp->mbcl_size; | |
7270 | totmem += mem; | |
7271 | totfree += (sp->mbcl_mc_cached + sp->mbcl_infree) * | |
7272 | sp->mbcl_size; | |
7273 | ||
7274 | } | |
7275 | ||
7276 | /* adjust free counts to include composite caches */ | |
7277 | m_clfree += m_mbufclfree; | |
7278 | m_bigclfree += m_mbufbigclfree; | |
7279 | m_16kclfree += m_mbuf16kclfree; | |
7280 | ||
7281 | totmbufs = 0; | |
7282 | for (mp = mbtypes; mp->mt_name != NULL; mp++) | |
7283 | totmbufs += mbstat.m_mtypes[mp->mt_type]; | |
7284 | if (totmbufs > m_mbufs) | |
7285 | totmbufs = m_mbufs; | |
7286 | k = snprintf(c, clen, "%lu/%u mbufs in use:\n", totmbufs, m_mbufs); | |
7287 | MBUF_DUMP_BUF_CHK(); | |
7288 | ||
7289 | bzero(&seen, sizeof (seen)); | |
7290 | for (mp = mbtypes; mp->mt_name != NULL; mp++) { | |
7291 | if (mbstat.m_mtypes[mp->mt_type] != 0) { | |
7292 | seen[mp->mt_type] = 1; | |
7293 | k = snprintf(c, clen, "\t%u mbufs allocated to %s\n", | |
7294 | mbstat.m_mtypes[mp->mt_type], mp->mt_name); | |
7295 | MBUF_DUMP_BUF_CHK(); | |
7296 | } | |
7297 | } | |
7298 | seen[MT_FREE] = 1; | |
7299 | for (i = 0; i < nmbtypes; i++) | |
7300 | if (!seen[i] && mbstat.m_mtypes[i] != 0) { | |
7301 | k = snprintf(c, clen, "\t%u mbufs allocated to " | |
7302 | "<mbuf type %d>\n", mbstat.m_mtypes[i], i); | |
7303 | MBUF_DUMP_BUF_CHK(); | |
7304 | } | |
7305 | if ((m_mbufs - totmbufs) > 0) { | |
7306 | k = snprintf(c, clen, "\t%lu mbufs allocated to caches\n", | |
7307 | m_mbufs - totmbufs); | |
7308 | MBUF_DUMP_BUF_CHK(); | |
7309 | } | |
7310 | k = snprintf(c, clen, "%u/%u mbuf 2KB clusters in use\n" | |
7311 | "%u/%u mbuf 4KB clusters in use\n", | |
7312 | (unsigned int)(mbstat.m_clusters - m_clfree), | |
7313 | (unsigned int)mbstat.m_clusters, | |
7314 | (unsigned int)(mbstat.m_bigclusters - m_bigclfree), | |
7315 | (unsigned int)mbstat.m_bigclusters); | |
7316 | MBUF_DUMP_BUF_CHK(); | |
7317 | ||
7318 | if (njcl > 0) { | |
7319 | k = snprintf(c, clen, "%u/%u mbuf %uKB clusters in use\n", | |
7320 | m_16kclusters - m_16kclfree, m_16kclusters, | |
7321 | njclbytes / 1024); | |
7322 | MBUF_DUMP_BUF_CHK(); | |
7323 | } | |
7324 | totused = totmem - totfree; | |
7325 | if (totmem == 0) { | |
7326 | totpct = 0; | |
7327 | } else if (totused < (ULONG_MAX / 100)) { | |
7328 | totpct = (totused * 100) / totmem; | |
7329 | } else { | |
7330 | u_long totmem1 = totmem / 100; | |
7331 | u_long totused1 = totused / 100; | |
7332 | totpct = (totused1 * 100) / totmem1; | |
7333 | } | |
7334 | k = snprintf(c, clen, "%lu KB allocated to network (approx. %lu%% " | |
7335 | "in use)\n", totmem / 1024, totpct); | |
7336 | MBUF_DUMP_BUF_CHK(); | |
7337 | ||
316670eb A |
7338 | /* mbuf leak detection statistics */ |
7339 | mleak_update_stats(); | |
7340 | ||
7341 | k = snprintf(c, clen, "\nmbuf leak detection table:\n"); | |
7342 | MBUF_DUMP_BUF_CHK(); | |
7343 | k = snprintf(c, clen, "\ttotal captured: %u (one per %u)\n", | |
7344 | mleak_table.mleak_capture / mleak_table.mleak_sample_factor, | |
7345 | mleak_table.mleak_sample_factor); | |
7346 | MBUF_DUMP_BUF_CHK(); | |
7347 | k = snprintf(c, clen, "\ttotal allocs outstanding: %llu\n", | |
7348 | mleak_table.outstanding_allocs); | |
7349 | MBUF_DUMP_BUF_CHK(); | |
7350 | k = snprintf(c, clen, "\tnew hash recorded: %llu allocs, %llu traces\n", | |
7351 | mleak_table.alloc_recorded, mleak_table.trace_recorded); | |
7352 | MBUF_DUMP_BUF_CHK(); | |
7353 | k = snprintf(c, clen, "\thash collisions: %llu allocs, %llu traces\n", | |
7354 | mleak_table.alloc_collisions, mleak_table.trace_collisions); | |
7355 | MBUF_DUMP_BUF_CHK(); | |
7356 | k = snprintf(c, clen, "\toverwrites: %llu allocs, %llu traces\n", | |
7357 | mleak_table.alloc_overwrites, mleak_table.trace_overwrites); | |
7358 | MBUF_DUMP_BUF_CHK(); | |
7359 | k = snprintf(c, clen, "\tlock conflicts: %llu\n\n", | |
7360 | mleak_table.total_conflicts); | |
7361 | MBUF_DUMP_BUF_CHK(); | |
7362 | ||
7363 | k = snprintf(c, clen, "top %d outstanding traces:\n", | |
7364 | mleak_stat->ml_cnt); | |
7365 | MBUF_DUMP_BUF_CHK(); | |
7366 | for (i = 0; i < mleak_stat->ml_cnt; i++) { | |
7367 | mltr = &mleak_stat->ml_trace[i]; | |
7368 | k = snprintf(c, clen, "[%d] %llu outstanding alloc(s), " | |
7369 | "%llu hit(s), %llu collision(s)\n", (i + 1), | |
7370 | mltr->mltr_allocs, mltr->mltr_hitcount, | |
7371 | mltr->mltr_collisions); | |
7372 | MBUF_DUMP_BUF_CHK(); | |
7373 | } | |
7374 | ||
7375 | if (mleak_stat->ml_isaddr64) | |
7376 | k = snprintf(c, clen, MB_LEAK_HDR_64); | |
7377 | else | |
7378 | k = snprintf(c, clen, MB_LEAK_HDR_32); | |
7379 | MBUF_DUMP_BUF_CHK(); | |
7380 | ||
7381 | for (i = 0; i < MLEAK_STACK_DEPTH; i++) { | |
7382 | int j; | |
7383 | k = snprintf(c, clen, "%2d: ", (i + 1)); | |
7384 | MBUF_DUMP_BUF_CHK(); | |
7385 | for (j = 0; j < mleak_stat->ml_cnt; j++) { | |
7386 | mltr = &mleak_stat->ml_trace[j]; | |
7387 | if (i < mltr->mltr_depth) { | |
7388 | if (mleak_stat->ml_isaddr64) { | |
7389 | k = snprintf(c, clen, "0x%0llx ", | |
fe8ab488 A |
7390 | (uint64_t)VM_KERNEL_UNSLIDE( |
7391 | mltr->mltr_addr[i])); | |
316670eb A |
7392 | } else { |
7393 | k = snprintf(c, clen, | |
7394 | "0x%08x ", | |
fe8ab488 A |
7395 | (uint32_t)VM_KERNEL_UNSLIDE( |
7396 | mltr->mltr_addr[i])); | |
316670eb A |
7397 | } |
7398 | } else { | |
7399 | if (mleak_stat->ml_isaddr64) | |
7400 | k = snprintf(c, clen, | |
7401 | MB_LEAK_SPACING_64); | |
7402 | else | |
7403 | k = snprintf(c, clen, | |
7404 | MB_LEAK_SPACING_32); | |
7405 | } | |
7406 | MBUF_DUMP_BUF_CHK(); | |
7407 | } | |
7408 | k = snprintf(c, clen, "\n"); | |
7409 | MBUF_DUMP_BUF_CHK(); | |
7410 | } | |
6d2010ae A |
7411 | done: |
7412 | return (mbuf_dump_buf); | |
7413 | } | |
7414 | ||
7415 | #undef MBUF_DUMP_BUF_CHK | |
7416 | ||
39236c6e A |
7417 | /* |
7418 | * Convert between a regular and a packet header mbuf. Caller is responsible | |
7419 | * for setting or clearing M_PKTHDR; this routine does the rest of the work. | |
7420 | */ | |
7421 | int | |
7422 | m_reinit(struct mbuf *m, int hdr) | |
7423 | { | |
7424 | int ret = 0; | |
7425 | ||
7426 | if (hdr) { | |
7427 | VERIFY(!(m->m_flags & M_PKTHDR)); | |
7428 | if (!(m->m_flags & M_EXT) && | |
7429 | (m->m_data != m->m_dat || m->m_len > 0)) { | |
7430 | /* | |
7431 | * If there's no external cluster attached and the | |
7432 | * mbuf appears to contain user data, we cannot | |
7433 | * safely convert this to a packet header mbuf, | |
7434 | * as the packet header structure might overlap | |
7435 | * with the data. | |
7436 | */ | |
fe8ab488 A |
7437 | printf("%s: cannot set M_PKTHDR on altered mbuf %llx, " |
7438 | "m_data %llx (expected %llx), " | |
7439 | "m_len %d (expected 0)\n", | |
7440 | __func__, | |
7441 | (uint64_t)VM_KERNEL_ADDRPERM(m), | |
7442 | (uint64_t)VM_KERNEL_ADDRPERM(m->m_data), | |
7443 | (uint64_t)VM_KERNEL_ADDRPERM(m->m_dat), m->m_len); | |
39236c6e A |
7444 | ret = EBUSY; |
7445 | } else { | |
7446 | VERIFY((m->m_flags & M_EXT) || m->m_data == m->m_dat); | |
7447 | m->m_flags |= M_PKTHDR; | |
7448 | MBUF_INIT_PKTHDR(m); | |
7449 | } | |
7450 | } else { | |
7451 | /* Check for scratch area overflow */ | |
7452 | m_redzone_verify(m); | |
7453 | /* Free the aux data and tags if there is any */ | |
7454 | m_tag_delete_chain(m, NULL); | |
7455 | m->m_flags &= ~M_PKTHDR; | |
7456 | } | |
7457 | ||
7458 | return (ret); | |
7459 | } | |
7460 | ||
39037602 A |
7461 | int |
7462 | m_ext_set_prop(struct mbuf *m, uint32_t o, uint32_t n) | |
7463 | { | |
7464 | ASSERT(m->m_flags & M_EXT); | |
7465 | return (atomic_test_set_32(&MEXT_PRIV(m), o, n)); | |
7466 | } | |
7467 | ||
7468 | uint32_t | |
7469 | m_ext_get_prop(struct mbuf *m) | |
7470 | { | |
7471 | ASSERT(m->m_flags & M_EXT); | |
7472 | return (MEXT_PRIV(m)); | |
7473 | } | |
7474 | ||
7475 | int | |
7476 | m_ext_paired_is_active(struct mbuf *m) | |
7477 | { | |
7478 | return (MBUF_IS_PAIRED(m) ? (MEXT_PREF(m) > MEXT_MINREF(m)) : 1); | |
7479 | } | |
7480 | ||
7481 | void | |
7482 | m_ext_paired_activate(struct mbuf *m) | |
7483 | { | |
7484 | struct ext_ref *rfa; | |
7485 | int hdr, type; | |
7486 | caddr_t extbuf; | |
7487 | void *extfree; | |
7488 | u_int extsize; | |
7489 | ||
7490 | VERIFY(MBUF_IS_PAIRED(m)); | |
7491 | VERIFY(MEXT_REF(m) == MEXT_MINREF(m)); | |
7492 | VERIFY(MEXT_PREF(m) == MEXT_MINREF(m)); | |
7493 | ||
7494 | hdr = (m->m_flags & M_PKTHDR); | |
7495 | type = m->m_type; | |
7496 | extbuf = m->m_ext.ext_buf; | |
7497 | extfree = m->m_ext.ext_free; | |
7498 | extsize = m->m_ext.ext_size; | |
7499 | rfa = MEXT_RFA(m); | |
7500 | ||
7501 | VERIFY(extbuf != NULL && rfa != NULL); | |
7502 | ||
7503 | /* | |
7504 | * Safe to reinitialize packet header tags, since it's | |
7505 | * already taken care of at m_free() time. Similar to | |
7506 | * what's done in m_clattach() for the cluster. Bump | |
7507 | * up MEXT_PREF to indicate activation. | |
7508 | */ | |
7509 | MBUF_INIT(m, hdr, type); | |
7510 | MEXT_INIT(m, extbuf, extsize, extfree, (caddr_t)m, rfa, | |
7511 | 1, 1, 2, EXTF_PAIRED, MEXT_PRIV(m), m); | |
7512 | } | |
7513 | ||
39236c6e A |
7514 | void |
7515 | m_scratch_init(struct mbuf *m) | |
7516 | { | |
fe8ab488 A |
7517 | struct pkthdr *pkt = &m->m_pkthdr; |
7518 | ||
39236c6e A |
7519 | VERIFY(m->m_flags & M_PKTHDR); |
7520 | ||
fe8ab488 A |
7521 | /* See comments in <rdar://problem/14040693> */ |
7522 | if (pkt->pkt_flags & PKTF_PRIV_GUARDED) { | |
7523 | panic_plain("Invalid attempt to modify guarded module-private " | |
7524 | "area: mbuf %p, pkt_flags 0x%x\n", m, pkt->pkt_flags); | |
7525 | /* NOTREACHED */ | |
7526 | } | |
7527 | ||
7528 | bzero(&pkt->pkt_mpriv, sizeof (pkt->pkt_mpriv)); | |
39236c6e A |
7529 | } |
7530 | ||
fe8ab488 A |
7531 | /* |
7532 | * This routine is reserved for mbuf_get_driver_scratch(); clients inside | |
7533 | * xnu that intend on utilizing the module-private area should directly | |
7534 | * refer to the pkt_mpriv structure in the pkthdr. They are also expected | |
7535 | * to set and clear PKTF_PRIV_GUARDED, while owning the packet and prior | |
7536 | * to handing it off to another module, respectively. | |
7537 | */ | |
39236c6e A |
7538 | u_int32_t |
7539 | m_scratch_get(struct mbuf *m, u_int8_t **p) | |
7540 | { | |
fe8ab488 A |
7541 | struct pkthdr *pkt = &m->m_pkthdr; |
7542 | ||
39236c6e A |
7543 | VERIFY(m->m_flags & M_PKTHDR); |
7544 | ||
fe8ab488 A |
7545 | /* See comments in <rdar://problem/14040693> */ |
7546 | if (pkt->pkt_flags & PKTF_PRIV_GUARDED) { | |
7547 | panic_plain("Invalid attempt to access guarded module-private " | |
7548 | "area: mbuf %p, pkt_flags 0x%x\n", m, pkt->pkt_flags); | |
7549 | /* NOTREACHED */ | |
7550 | } | |
7551 | ||
39236c6e A |
7552 | if (mcltrace) { |
7553 | mcache_audit_t *mca; | |
7554 | ||
7555 | lck_mtx_lock(mbuf_mlock); | |
7556 | mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m); | |
7557 | if (mca->mca_uflags & MB_SCVALID) | |
7558 | mcl_audit_scratch(mca); | |
7559 | lck_mtx_unlock(mbuf_mlock); | |
7560 | } | |
7561 | ||
fe8ab488 A |
7562 | *p = (u_int8_t *)&pkt->pkt_mpriv; |
7563 | return (sizeof (pkt->pkt_mpriv)); | |
39236c6e A |
7564 | } |
7565 | ||
7566 | static void | |
7567 | m_redzone_init(struct mbuf *m) | |
7568 | { | |
7569 | VERIFY(m->m_flags & M_PKTHDR); | |
7570 | /* | |
7571 | * Each mbuf has a unique red zone pattern, which is a XOR | |
7572 | * of the red zone cookie and the address of the mbuf. | |
7573 | */ | |
7574 | m->m_pkthdr.redzone = ((u_int32_t)(uintptr_t)m) ^ mb_redzone_cookie; | |
7575 | } | |
7576 | ||
7577 | static void | |
7578 | m_redzone_verify(struct mbuf *m) | |
7579 | { | |
7580 | u_int32_t mb_redzone; | |
7581 | ||
7582 | VERIFY(m->m_flags & M_PKTHDR); | |
7583 | ||
7584 | mb_redzone = ((u_int32_t)(uintptr_t)m) ^ mb_redzone_cookie; | |
7585 | if (m->m_pkthdr.redzone != mb_redzone) { | |
7586 | panic("mbuf %p redzone violation with value 0x%x " | |
7587 | "(instead of 0x%x, using cookie 0x%x)\n", | |
7588 | m, m->m_pkthdr.redzone, mb_redzone, mb_redzone_cookie); | |
7589 | /* NOTREACHED */ | |
7590 | } | |
7591 | } | |
7592 | ||
fe8ab488 A |
7593 | /* |
7594 | * Send a report of mbuf usage if the usage is at least 6% of max limit | |
7595 | * or if there has been at least 3% increase since the last report. | |
7596 | * | |
7597 | * The values 6% and 3% are chosen so that we can do simple arithmetic | |
7598 | * with shift operations. | |
39037602 | 7599 | */ |
fe8ab488 A |
7600 | static boolean_t |
7601 | mbuf_report_usage(mbuf_class_t cl) | |
7602 | { | |
7603 | /* if a report is already in progress, nothing to do */ | |
7604 | if (mb_peak_newreport) | |
7605 | return (TRUE); | |
7606 | ||
7607 | if (m_total(cl) > m_peak(cl) && | |
7608 | m_total(cl) >= (m_maxlimit(cl) >> 4) && | |
7609 | (m_total(cl) - m_peak(cl)) >= (m_peak(cl) >> 5)) | |
7610 | return (TRUE); | |
7611 | return (FALSE); | |
7612 | } | |
7613 | ||
7614 | __private_extern__ void | |
7615 | mbuf_report_peak_usage(void) | |
7616 | { | |
39037602 | 7617 | int i = 0; |
fe8ab488 A |
7618 | u_int64_t uptime; |
7619 | struct nstat_sysinfo_data ns_data; | |
7620 | uint32_t memreleased = 0; | |
7621 | ||
7622 | uptime = net_uptime(); | |
7623 | lck_mtx_lock(mbuf_mlock); | |
7624 | ||
7625 | /* Generate an initial report after 1 week of uptime */ | |
39037602 | 7626 | if (!mb_peak_firstreport && |
fe8ab488 A |
7627 | uptime > MBUF_PEAK_FIRST_REPORT_THRESHOLD) { |
7628 | mb_peak_newreport = TRUE; | |
7629 | mb_peak_firstreport = TRUE; | |
7630 | } | |
7631 | ||
7632 | if (!mb_peak_newreport) { | |
7633 | lck_mtx_unlock(mbuf_mlock); | |
7634 | return; | |
7635 | } | |
7636 | ||
7637 | /* | |
39037602 | 7638 | * Since a report is being generated before 1 week, |
fe8ab488 A |
7639 | * we do not need to force another one later |
7640 | */ | |
7641 | if (uptime < MBUF_PEAK_FIRST_REPORT_THRESHOLD) | |
7642 | mb_peak_firstreport = TRUE; | |
7643 | ||
7644 | for (i = 0; i < NELEM(mbuf_table); i++) { | |
7645 | m_peak(m_class(i)) = m_total(m_class(i)); | |
7646 | memreleased += m_release_cnt(i); | |
3e170ce0 | 7647 | m_release_cnt(i) = 0; |
fe8ab488 A |
7648 | } |
7649 | mb_peak_newreport = FALSE; | |
7650 | lck_mtx_unlock(mbuf_mlock); | |
7651 | ||
7652 | bzero(&ns_data, sizeof(ns_data)); | |
7653 | ns_data.flags = NSTAT_SYSINFO_MBUF_STATS; | |
7654 | ns_data.u.mb_stats.total_256b = m_peak(MC_MBUF); | |
7655 | ns_data.u.mb_stats.total_2kb = m_peak(MC_CL); | |
7656 | ns_data.u.mb_stats.total_4kb = m_peak(MC_BIGCL); | |
3e170ce0 | 7657 | ns_data.u.mb_stats.total_16kb = m_peak(MC_16KCL); |
fe8ab488 A |
7658 | ns_data.u.mb_stats.sbmb_total = total_sbmb_cnt_peak; |
7659 | ns_data.u.mb_stats.sb_atmbuflimit = sbmb_limreached; | |
7660 | ns_data.u.mb_stats.draincnt = mbstat.m_drain; | |
7661 | ns_data.u.mb_stats.memreleased = memreleased; | |
39037602 | 7662 | ns_data.u.mb_stats.sbmb_floor = total_sbmb_cnt_floor; |
fe8ab488 A |
7663 | |
7664 | nstat_sysinfo_send_data(&ns_data); | |
39037602 A |
7665 | |
7666 | /* | |
7667 | * Reset the floor whenever we report a new | |
7668 | * peak to track the trend (increase peek usage | |
7669 | * is not a leak if mbufs get released | |
7670 | * between reports and the floor stays low) | |
7671 | */ | |
7672 | total_sbmb_cnt_floor = total_sbmb_cnt_peak; | |
fe8ab488 A |
7673 | } |
7674 | ||
7675 | /* | |
7676 | * Called by the VM when there's memory pressure. | |
7677 | */ | |
7678 | __private_extern__ void | |
7679 | m_drain(void) | |
7680 | { | |
7681 | mbuf_class_t mc; | |
7682 | mcl_slab_t *sp, *sp_tmp, *nsp; | |
7683 | unsigned int num, k, interval, released = 0; | |
39037602 | 7684 | unsigned long total_mem = 0, use_mem = 0; |
fe8ab488 A |
7685 | boolean_t ret, purge_caches = FALSE; |
7686 | ppnum_t offset; | |
7687 | mcache_obj_t *obj; | |
39037602 | 7688 | unsigned long per; |
fe8ab488 A |
7689 | static uint64_t last_drain = 0; |
7690 | static unsigned char scratch[32]; | |
7691 | static ppnum_t scratch_pa = 0; | |
7692 | ||
7693 | if (mb_drain_maxint == 0 || mb_waiters) | |
7694 | return; | |
7695 | if (scratch_pa == 0) { | |
7696 | bzero(scratch, sizeof(scratch)); | |
7697 | scratch_pa = pmap_find_phys(kernel_pmap, (addr64_t)scratch); | |
7698 | VERIFY(scratch_pa); | |
7699 | } else if (mclverify) { | |
7700 | /* | |
7701 | * Panic if a driver wrote to our scratch memory. | |
7702 | */ | |
7703 | for (k = 0; k < sizeof(scratch); k++) | |
7704 | if (scratch[k]) | |
7705 | panic("suspect DMA to freed address"); | |
7706 | } | |
7707 | /* | |
7708 | * Don't free memory too often as that could cause excessive | |
7709 | * waiting times for mbufs. Purge caches if we were asked to drain | |
7710 | * in the last 5 minutes. | |
7711 | */ | |
7712 | lck_mtx_lock(mbuf_mlock); | |
7713 | if (last_drain == 0) { | |
7714 | last_drain = net_uptime(); | |
7715 | lck_mtx_unlock(mbuf_mlock); | |
7716 | return; | |
7717 | } | |
39037602 | 7718 | interval = net_uptime() - last_drain; |
fe8ab488 A |
7719 | if (interval <= mb_drain_maxint) { |
7720 | lck_mtx_unlock(mbuf_mlock); | |
7721 | return; | |
39037602 | 7722 | } |
fe8ab488 A |
7723 | if (interval <= mb_drain_maxint * 5) |
7724 | purge_caches = TRUE; | |
7725 | last_drain = net_uptime(); | |
7726 | /* | |
7727 | * Don't free any memory if we're using 60% or more. | |
7728 | */ | |
7729 | for (mc = 0; mc < NELEM(mbuf_table); mc++) { | |
7730 | total_mem += m_total(mc) * m_maxsize(mc); | |
7731 | use_mem += m_active(mc) * m_maxsize(mc); | |
7732 | } | |
39037602 A |
7733 | per = (use_mem * 100) / total_mem; |
7734 | if (per >= 60) { | |
fe8ab488 A |
7735 | lck_mtx_unlock(mbuf_mlock); |
7736 | return; | |
7737 | } | |
7738 | /* | |
7739 | * Purge all the caches. This effectively disables | |
7740 | * caching for a few seconds, but the mbuf worker thread will | |
7741 | * re-enable them again. | |
7742 | */ | |
7743 | if (purge_caches == TRUE) | |
7744 | for (mc = 0; mc < NELEM(mbuf_table); mc++) { | |
7745 | if (m_total(mc) < m_avgtotal(mc)) | |
7746 | continue; | |
7747 | lck_mtx_unlock(mbuf_mlock); | |
7748 | ret = mcache_purge_cache(m_cache(mc), FALSE); | |
7749 | lck_mtx_lock(mbuf_mlock); | |
7750 | if (ret == TRUE) | |
7751 | m_purge_cnt(mc)++; | |
7752 | } | |
7753 | /* | |
7754 | * Move the objects from the composite class freelist to | |
7755 | * the rudimentary slabs list, but keep at least 10% of the average | |
7756 | * total in the freelist. | |
7757 | */ | |
7758 | for (mc = 0; mc < NELEM(mbuf_table); mc++) { | |
39037602 | 7759 | while (m_cobjlist(mc) && |
fe8ab488 A |
7760 | m_total(mc) < m_avgtotal(mc) && |
7761 | m_infree(mc) > 0.1 * m_avgtotal(mc) + m_minlimit(mc)) { | |
7762 | obj = m_cobjlist(mc); | |
7763 | m_cobjlist(mc) = obj->obj_next; | |
7764 | obj->obj_next = NULL; | |
7765 | num = cslab_free(mc, obj, 1); | |
7766 | VERIFY(num == 1); | |
7767 | m_free_cnt(mc)++; | |
7768 | m_infree(mc)--; | |
7769 | /* cslab_free() handles m_total */ | |
7770 | } | |
7771 | } | |
7772 | /* | |
7773 | * Free the buffers present in the slab list up to 10% of the total | |
7774 | * average per class. | |
7775 | * | |
7776 | * We walk the list backwards in an attempt to reduce fragmentation. | |
7777 | */ | |
7778 | for (mc = NELEM(mbuf_table) - 1; (int)mc >= 0; mc--) { | |
7779 | TAILQ_FOREACH_SAFE(sp, &m_slablist(mc), sl_link, sp_tmp) { | |
7780 | /* | |
7781 | * Process only unused slabs occupying memory. | |
7782 | */ | |
7783 | if (sp->sl_refcnt != 0 || sp->sl_len == 0 || | |
7784 | sp->sl_base == NULL) | |
7785 | continue; | |
7786 | if (m_total(mc) < m_avgtotal(mc) || | |
7787 | m_infree(mc) < 0.1 * m_avgtotal(mc) + m_minlimit(mc)) | |
7788 | break; | |
7789 | slab_remove(sp, mc); | |
7790 | switch (mc) { | |
7791 | case MC_MBUF: | |
3e170ce0 A |
7792 | m_infree(mc) -= NMBPG; |
7793 | m_total(mc) -= NMBPG; | |
fe8ab488 | 7794 | if (mclaudit != NULL) |
3e170ce0 | 7795 | mcl_audit_free(sp->sl_base, NMBPG); |
fe8ab488 A |
7796 | break; |
7797 | case MC_CL: | |
3e170ce0 A |
7798 | m_infree(mc) -= NCLPG; |
7799 | m_total(mc) -= NCLPG; | |
fe8ab488 | 7800 | if (mclaudit != NULL) |
3e170ce0 | 7801 | mcl_audit_free(sp->sl_base, NMBPG); |
fe8ab488 A |
7802 | break; |
7803 | case MC_BIGCL: | |
3e170ce0 A |
7804 | { |
7805 | m_infree(mc) -= NBCLPG; | |
7806 | m_total(mc) -= NBCLPG; | |
fe8ab488 | 7807 | if (mclaudit != NULL) |
3e170ce0 | 7808 | mcl_audit_free(sp->sl_base, NMBPG); |
fe8ab488 | 7809 | break; |
3e170ce0 | 7810 | } |
fe8ab488 A |
7811 | case MC_16KCL: |
7812 | m_infree(mc)--; | |
7813 | m_total(mc)--; | |
7814 | for (nsp = sp, k = 1; k < NSLABSP16KB; k++) { | |
7815 | nsp = nsp->sl_next; | |
39037602 | 7816 | VERIFY(nsp->sl_refcnt == 0 && |
fe8ab488 A |
7817 | nsp->sl_base != NULL && |
7818 | nsp->sl_len == 0); | |
7819 | slab_init(nsp, 0, 0, NULL, NULL, 0, 0, | |
7820 | 0); | |
7821 | nsp->sl_flags = 0; | |
7822 | } | |
7823 | if (mclaudit != NULL) | |
7824 | mcl_audit_free(sp->sl_base, 1); | |
7825 | break; | |
7826 | default: | |
7827 | /* | |
7828 | * The composite classes have their own | |
7829 | * freelist (m_cobjlist), so we only | |
7830 | * process rudimentary classes here. | |
7831 | */ | |
7832 | VERIFY(0); | |
7833 | } | |
7834 | m_release_cnt(mc) += m_size(mc); | |
7835 | released += m_size(mc); | |
3e170ce0 A |
7836 | VERIFY(sp->sl_base != NULL && |
7837 | sp->sl_len >= PAGE_SIZE); | |
7838 | offset = MTOPG(sp->sl_base); | |
fe8ab488 A |
7839 | /* |
7840 | * Make sure the IOMapper points to a valid, but | |
7841 | * bogus, address. This should prevent further DMA | |
7842 | * accesses to freed memory. | |
7843 | */ | |
7844 | IOMapperInsertPage(mcl_paddr_base, offset, scratch_pa); | |
7845 | mcl_paddr[offset] = 0; | |
39037602 | 7846 | kmem_free(mb_map, (vm_offset_t)sp->sl_base, |
fe8ab488 A |
7847 | sp->sl_len); |
7848 | slab_init(sp, 0, 0, NULL, NULL, 0, 0, 0); | |
7849 | sp->sl_flags = 0; | |
7850 | } | |
7851 | } | |
7852 | mbstat.m_drain++; | |
7853 | mbstat.m_bigclusters = m_total(MC_BIGCL); | |
7854 | mbstat.m_clusters = m_total(MC_CL); | |
7855 | mbstat.m_mbufs = m_total(MC_MBUF); | |
7856 | mbuf_stat_sync(); | |
7857 | mbuf_mtypes_sync(TRUE); | |
7858 | lck_mtx_unlock(mbuf_mlock); | |
7859 | } | |
7860 | ||
7861 | static int | |
7862 | m_drain_force_sysctl SYSCTL_HANDLER_ARGS | |
7863 | { | |
7864 | #pragma unused(arg1, arg2) | |
7865 | int val = 0, err; | |
39037602 | 7866 | |
fe8ab488 A |
7867 | err = sysctl_handle_int(oidp, &val, 0, req); |
7868 | if (err != 0 || req->newptr == USER_ADDR_NULL) | |
7869 | return (err); | |
7870 | if (val) | |
7871 | m_drain(); | |
7872 | ||
7873 | return (err); | |
7874 | } | |
7875 | ||
2d21ac55 | 7876 | SYSCTL_DECL(_kern_ipc); |
6d2010ae | 7877 | SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, |
fe8ab488 | 7878 | CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, |
2d21ac55 | 7879 | 0, 0, mbstat_sysctl, "S,mbstat", ""); |
6d2010ae | 7880 | SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_stat, |
fe8ab488 | 7881 | CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, |
2d21ac55 | 7882 | 0, 0, mb_stat_sysctl, "S,mb_stat", ""); |
6d2010ae | 7883 | SYSCTL_PROC(_kern_ipc, OID_AUTO, mleak_top_trace, |
fe8ab488 | 7884 | CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, |
6d2010ae A |
7885 | 0, 0, mleak_top_trace_sysctl, "S,mb_top_trace", ""); |
7886 | SYSCTL_PROC(_kern_ipc, OID_AUTO, mleak_table, | |
fe8ab488 | 7887 | CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, |
6d2010ae A |
7888 | 0, 0, mleak_table_sysctl, "S,mleak_table", ""); |
7889 | SYSCTL_INT(_kern_ipc, OID_AUTO, mleak_sample_factor, | |
7890 | CTLFLAG_RW | CTLFLAG_LOCKED, &mleak_table.mleak_sample_factor, 0, ""); | |
7891 | SYSCTL_INT(_kern_ipc, OID_AUTO, mb_normalized, | |
7892 | CTLFLAG_RD | CTLFLAG_LOCKED, &mb_normalized, 0, ""); | |
7893 | SYSCTL_INT(_kern_ipc, OID_AUTO, mb_watchdog, | |
7894 | CTLFLAG_RW | CTLFLAG_LOCKED, &mb_watchdog, 0, ""); | |
fe8ab488 | 7895 | SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_drain_force, |
39037602 | 7896 | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0, |
fe8ab488 A |
7897 | m_drain_force_sysctl, "I", |
7898 | "Forces the mbuf garbage collection to run"); | |
7899 | SYSCTL_INT(_kern_ipc, OID_AUTO, mb_drain_maxint, | |
7900 | CTLFLAG_RW | CTLFLAG_LOCKED, &mb_drain_maxint, 0, | |
7901 | "Minimum time interval between garbage collection"); |