2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1988, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/malloc.h>
74 #include <sys/kernel.h>
75 #include <sys/sysctl.h>
76 #include <sys/syslog.h>
77 #include <sys/protosw.h>
78 #include <sys/domain.h>
79 #include <sys/queue.h>
82 #include <dev/random/randomdev.h>
84 #include <kern/kern_types.h>
85 #include <kern/simple_lock.h>
86 #include <kern/queue.h>
87 #include <kern/sched_prim.h>
88 #include <kern/backtrace.h>
89 #include <kern/cpu_number.h>
90 #include <kern/zalloc.h>
92 #include <libkern/OSAtomic.h>
93 #include <libkern/OSDebug.h>
94 #include <libkern/libkern.h>
96 #include <IOKit/IOMapper.h>
98 #include <machine/limits.h>
99 #include <machine/machine_routines.h>
102 #include <security/mac_framework.h>
105 #include <sys/mcache.h>
106 #include <net/ntstat.h>
109 * MBUF IMPLEMENTATION NOTES.
111 * There is a total of 5 per-CPU caches:
114 * This is a cache of rudimentary objects of MSIZE in size; each
115 * object represents an mbuf structure. This cache preserves only
116 * the m_type field of the mbuf during its transactions.
119 * This is a cache of rudimentary objects of MCLBYTES in size; each
120 * object represents a mcluster structure. This cache does not
121 * preserve the contents of the objects during its transactions.
124 * This is a cache of rudimentary objects of MBIGCLBYTES in size; each
125 * object represents a mbigcluster structure. This cache does not
126 * preserve the contents of the objects during its transaction.
129 * This is a cache of mbufs each having a cluster attached to it.
130 * It is backed by MC_MBUF and MC_CL rudimentary caches. Several
131 * fields of the mbuf related to the external cluster are preserved
132 * during transactions.
135 * This is a cache of mbufs each having a big cluster attached to it.
136 * It is backed by MC_MBUF and MC_BIGCL rudimentary caches. Several
137 * fields of the mbuf related to the external cluster are preserved
138 * during transactions.
142 * Allocation requests are handled first at the per-CPU (mcache) layer
143 * before falling back to the slab layer. Performance is optimal when
144 * the request is satisfied at the CPU layer because global data/lock
145 * never gets accessed. When the slab layer is entered for allocation,
146 * the slab freelist will be checked first for available objects before
147 * the VM backing store is invoked. Slab layer operations are serialized
148 * for all of the caches as the mbuf global lock is held most of the time.
149 * Allocation paths are different depending on the class of objects:
151 * a. Rudimentary object:
153 * { m_get_common(), m_clattach(), m_mclget(),
154 * m_mclalloc(), m_bigalloc(), m_copym_with_hdrs(),
155 * composite object allocation }
158 * | +-----------------------+
160 * mcache_alloc/mcache_alloc_ext() mbuf_slab_audit()
163 * [CPU cache] -------> (found?) -------+
166 * mbuf_slab_alloc() |
169 * +---------> [freelist] -------> (found?) -------+
175 * +---<<---- kmem_mb_alloc()
177 * b. Composite object:
179 * { m_getpackets_internal(), m_allocpacket_internal() }
182 * | +------ (done) ---------+
184 * mcache_alloc/mcache_alloc_ext() mbuf_cslab_audit()
187 * [CPU cache] -------> (found?) -------+
190 * mbuf_cslab_alloc() |
193 * [freelist] -------> (found?) -------+
196 * (rudimentary object) |
197 * mcache_alloc/mcache_alloc_ext() ------>>-----+
199 * Auditing notes: If auditing is enabled, buffers will be subjected to
200 * integrity checks by the audit routine. This is done by verifying their
201 * contents against DEADBEEF (free) pattern before returning them to caller.
202 * As part of this step, the routine will also record the transaction and
203 * pattern-fill the buffers with BADDCAFE (uninitialized) pattern. It will
204 * also restore any constructed data structure fields if necessary.
206 * OBJECT DEALLOCATION:
208 * Freeing an object simply involves placing it into the CPU cache; this
209 * pollutes the cache to benefit subsequent allocations. The slab layer
210 * will only be entered if the object is to be purged out of the cache.
211 * During normal operations, this happens only when the CPU layer resizes
212 * its bucket while it's adjusting to the allocation load. Deallocation
213 * paths are different depending on the class of objects:
215 * a. Rudimentary object:
217 * { m_free(), m_freem_list(), composite object deallocation }
220 * | +------ (done) ---------+
222 * mcache_free/mcache_free_ext() |
225 * mbuf_slab_audit() |
228 * [CPU cache] ---> (not purging?) -----+
234 * [freelist] ----------->>------------+
235 * (objects get purged to VM only on demand)
237 * b. Composite object:
239 * { m_free(), m_freem_list() }
242 * | +------ (done) ---------+
244 * mcache_free/mcache_free_ext() |
247 * mbuf_cslab_audit() |
250 * [CPU cache] ---> (not purging?) -----+
253 * mbuf_cslab_free() |
256 * [freelist] ---> (not purging?) -----+
259 * (rudimentary object) |
260 * mcache_free/mcache_free_ext() ------->>------+
262 * Auditing notes: If auditing is enabled, the audit routine will save
263 * any constructed data structure fields (if necessary) before filling the
264 * contents of the buffers with DEADBEEF (free) pattern and recording the
265 * transaction. Buffers that are freed (whether at CPU or slab layer) are
266 * expected to contain the free pattern.
270 * Debugging can be enabled by adding "mbuf_debug=0x3" to boot-args; this
271 * translates to the mcache flags (MCF_VERIFY | MCF_AUDIT). Additionally,
272 * the CPU layer cache can be disabled by setting the MCF_NOCPUCACHE flag,
273 * i.e. modify the boot argument parameter to "mbuf_debug=0x13". Leak
274 * detection may also be disabled by setting the MCF_NOLEAKLOG flag, e.g.
275 * "mbuf_debug=0x113". Note that debugging consumes more CPU and memory.
277 * Each object is associated with exactly one mcache_audit_t structure that
278 * contains the information related to its last buffer transaction. Given
279 * an address of an object, the audit structure can be retrieved by finding
280 * the position of the object relevant to the base address of the cluster:
282 * +------------+ +=============+
283 * | mbuf addr | | mclaudit[i] |
284 * +------------+ +=============+
286 * i = MTOBG(addr) +-------------+
287 * | +-----> | cl_audit[1] | -----> mcache_audit_t
288 * b = BGTOM(i) | +-------------+
290 * x = MCLIDX(b, addr) | +-------------+
291 * | | | cl_audit[7] |
292 * +-----------------+ +-------------+
295 * The mclaudit[] array is allocated at initialization time, but its contents
296 * get populated when the corresponding cluster is created. Because a page
297 * can be turned into NMBPG number of mbufs, we preserve enough space for the
298 * mbufs so that there is a 1-to-1 mapping between them. A page that never
299 * gets (or has not yet) turned into mbufs will use only cl_audit[0] with the
300 * remaining entries unused. For 16KB cluster, only one entry from the first
301 * page is allocated and used for the entire object.
304 /* TODO: should be in header file */
305 /* kernel translater */
306 extern vm_offset_t
kmem_mb_alloc(vm_map_t
, int, int);
307 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
308 extern vm_map_t mb_map
; /* special map */
311 decl_lck_mtx_data(static, mbuf_mlock_data
);
312 static lck_mtx_t
*mbuf_mlock
= &mbuf_mlock_data
;
313 static lck_attr_t
*mbuf_mlock_attr
;
314 static lck_grp_t
*mbuf_mlock_grp
;
315 static lck_grp_attr_t
*mbuf_mlock_grp_attr
;
317 /* Back-end (common) layer */
318 static boolean_t mbuf_worker_needs_wakeup
; /* wait channel for mbuf worker */
319 static int mbuf_worker_ready
; /* worker thread is runnable */
320 static int mbuf_expand_mcl
; /* number of cluster creation requets */
321 static int mbuf_expand_big
; /* number of big cluster creation requests */
322 static int mbuf_expand_16k
; /* number of 16KB cluster creation requests */
323 static int ncpu
; /* number of CPUs */
324 static ppnum_t
*mcl_paddr
; /* Array of cluster physical addresses */
325 static ppnum_t mcl_pages
; /* Size of array (# physical pages) */
326 static ppnum_t mcl_paddr_base
; /* Handle returned by IOMapper::iovmAlloc() */
327 static mcache_t
*ref_cache
; /* Cache of cluster reference & flags */
328 static mcache_t
*mcl_audit_con_cache
; /* Audit contents cache */
329 static unsigned int mbuf_debug
; /* patchable mbuf mcache flags */
330 static unsigned int mb_normalized
; /* number of packets "normalized" */
332 #define MB_GROWTH_AGGRESSIVE 1 /* Threshold: 1/2 of total */
333 #define MB_GROWTH_NORMAL 2 /* Threshold: 3/4 of total */
336 MC_MBUF
= 0, /* Regular mbuf */
338 MC_BIGCL
, /* Large (4KB) cluster */
339 MC_16KCL
, /* Jumbo (16KB) cluster */
340 MC_MBUF_CL
, /* mbuf + cluster */
341 MC_MBUF_BIGCL
, /* mbuf + large (4KB) cluster */
342 MC_MBUF_16KCL
/* mbuf + jumbo (16KB) cluster */
345 #define MBUF_CLASS_MIN MC_MBUF
346 #define MBUF_CLASS_MAX MC_MBUF_16KCL
347 #define MBUF_CLASS_LAST MC_16KCL
348 #define MBUF_CLASS_VALID(c) \
349 ((int)(c) >= MBUF_CLASS_MIN && (int)(c) <= MBUF_CLASS_MAX)
350 #define MBUF_CLASS_COMPOSITE(c) \
351 ((int)(c) > MBUF_CLASS_LAST)
355 * mbuf specific mcache allocation request flags.
357 #define MCR_COMP MCR_USR1 /* for MC_MBUF_{CL,BIGCL,16KCL} caches */
360 * Per-cluster slab structure.
362 * A slab is a cluster control structure that contains one or more object
363 * chunks; the available chunks are chained in the slab's freelist (sl_head).
364 * Each time a chunk is taken out of the slab, the slab's reference count
365 * gets incremented. When all chunks have been taken out, the empty slab
366 * gets removed (SLF_DETACHED) from the class's slab list. A chunk that is
367 * returned to a slab causes the slab's reference count to be decremented;
368 * it also causes the slab to be reinserted back to class's slab list, if
369 * it's not already done.
371 * Compartmentalizing of the object chunks into slabs allows us to easily
372 * merge one or more slabs together when the adjacent slabs are idle, as
373 * well as to convert or move a slab from one class to another; e.g. the
374 * mbuf cluster slab can be converted to a regular cluster slab when all
375 * mbufs in the slab have been freed.
377 * A slab may also span across multiple clusters for chunks larger than
378 * a cluster's size. In this case, only the slab of the first cluster is
379 * used. The rest of the slabs are marked with SLF_PARTIAL to indicate
380 * that they are part of the larger slab.
382 * Each slab controls a page of memory.
384 typedef struct mcl_slab
{
385 struct mcl_slab
*sl_next
; /* neighboring slab */
386 u_int8_t sl_class
; /* controlling mbuf class */
387 int8_t sl_refcnt
; /* outstanding allocations */
388 int8_t sl_chunks
; /* chunks (bufs) in this slab */
389 u_int16_t sl_flags
; /* slab flags (see below) */
390 u_int16_t sl_len
; /* slab length */
391 void *sl_base
; /* base of allocated memory */
392 void *sl_head
; /* first free buffer */
393 TAILQ_ENTRY(mcl_slab
) sl_link
; /* next/prev slab on freelist */
396 #define SLF_MAPPED 0x0001 /* backed by a mapped page */
397 #define SLF_PARTIAL 0x0002 /* part of another slab */
398 #define SLF_DETACHED 0x0004 /* not in slab freelist */
401 * The array of slabs are broken into groups of arrays per 1MB of kernel
402 * memory to reduce the footprint. Each group is allocated on demand
403 * whenever a new piece of memory mapped in from the VM crosses the 1MB
406 #define NSLABSPMB ((1 << MBSHIFT) >> PAGE_SHIFT)
408 typedef struct mcl_slabg
{
409 mcl_slab_t
*slg_slab
; /* group of slabs */
413 * Number of slabs needed to control a 16KB cluster object.
415 #define NSLABSP16KB (M16KCLBYTES >> PAGE_SHIFT)
418 * Per-cluster audit structure.
421 mcache_audit_t
**cl_audit
; /* array of audits */
425 struct thread
*msa_thread
; /* thread doing transaction */
426 struct thread
*msa_pthread
; /* previous transaction thread */
427 uint32_t msa_tstamp
; /* transaction timestamp (ms) */
428 uint32_t msa_ptstamp
; /* prev transaction timestamp (ms) */
429 uint16_t msa_depth
; /* pc stack depth */
430 uint16_t msa_pdepth
; /* previous transaction pc stack */
431 void *msa_stack
[MCACHE_STACK_DEPTH
];
432 void *msa_pstack
[MCACHE_STACK_DEPTH
];
433 } mcl_scratch_audit_t
;
437 * Size of data from the beginning of an mbuf that covers m_hdr,
438 * pkthdr and m_ext structures. If auditing is enabled, we allocate
439 * a shadow mbuf structure of this size inside each audit structure,
440 * and the contents of the real mbuf gets copied into it when the mbuf
441 * is freed. This allows us to pattern-fill the mbuf for integrity
442 * check, and to preserve any constructed mbuf fields (e.g. mbuf +
443 * cluster cache case). Note that we don't save the contents of
444 * clusters when they are freed; we simply pattern-fill them.
446 u_int8_t sc_mbuf
[(MSIZE
- _MHLEN
) + sizeof (_m_ext_t
)];
447 mcl_scratch_audit_t sc_scratch
__attribute__((aligned(8)));
448 } mcl_saved_contents_t
;
450 #define AUDIT_CONTENTS_SIZE (sizeof (mcl_saved_contents_t))
452 #define MCA_SAVED_MBUF_PTR(_mca) \
453 ((struct mbuf *)(void *)((mcl_saved_contents_t *) \
454 (_mca)->mca_contents)->sc_mbuf)
455 #define MCA_SAVED_MBUF_SIZE \
456 (sizeof (((mcl_saved_contents_t *)0)->sc_mbuf))
457 #define MCA_SAVED_SCRATCH_PTR(_mca) \
458 (&((mcl_saved_contents_t *)(_mca)->mca_contents)->sc_scratch)
461 * mbuf specific mcache audit flags
463 #define MB_INUSE 0x01 /* object has not been returned to slab */
464 #define MB_COMP_INUSE 0x02 /* object has not been returned to cslab */
465 #define MB_SCVALID 0x04 /* object has valid saved contents */
468 * Each of the following two arrays hold up to nmbclusters elements.
470 static mcl_audit_t
*mclaudit
; /* array of cluster audit information */
471 static unsigned int maxclaudit
; /* max # of entries in audit table */
472 static mcl_slabg_t
**slabstbl
; /* cluster slabs table */
473 static unsigned int maxslabgrp
; /* max # of entries in slabs table */
474 static unsigned int slabgrp
; /* # of entries in slabs table */
477 int nclusters
; /* # of clusters for non-jumbo (legacy) sizes */
478 int njcl
; /* # of clusters for jumbo sizes */
479 int njclbytes
; /* size of a jumbo cluster */
480 unsigned char *mbutl
; /* first mapped cluster address */
481 unsigned char *embutl
; /* ending virtual address of mclusters */
482 int _max_linkhdr
; /* largest link-level header */
483 int _max_protohdr
; /* largest protocol header */
484 int max_hdr
; /* largest link+protocol header */
485 int max_datalen
; /* MHLEN - max_hdr */
487 static boolean_t mclverify
; /* debug: pattern-checking */
488 static boolean_t mcltrace
; /* debug: stack tracing */
489 static boolean_t mclfindleak
; /* debug: leak detection */
490 static boolean_t mclexpleak
; /* debug: expose leak info to user space */
492 static struct timeval mb_start
; /* beginning of time */
494 /* mbuf leak detection variables */
495 static struct mleak_table mleak_table
;
496 static mleak_stat_t
*mleak_stat
;
498 #define MLEAK_STAT_SIZE(n) \
499 ((size_t)(&((mleak_stat_t *)0)->ml_trace[n]))
502 mcache_obj_t
*element
; /* the alloc'ed element, NULL if unused */
503 u_int32_t trace_index
; /* mtrace index for corresponding backtrace */
504 u_int32_t count
; /* How many objects were requested */
505 u_int64_t hitcount
; /* for determining hash effectiveness */
509 u_int64_t collisions
;
513 uintptr_t addr
[MLEAK_STACK_DEPTH
];
516 /* Size must be a power of two for the zhash to be able to just mask off bits */
517 #define MLEAK_ALLOCATION_MAP_NUM 512
518 #define MLEAK_TRACE_MAP_NUM 256
521 * Sample factor for how often to record a trace. This is overwritable
522 * by the boot-arg mleak_sample_factor.
524 #define MLEAK_SAMPLE_FACTOR 500
527 * Number of top leakers recorded.
529 #define MLEAK_NUM_TRACES 5
531 #define MB_LEAK_SPACING_64 " "
532 #define MB_LEAK_SPACING_32 " "
535 #define MB_LEAK_HDR_32 "\n\
536 trace [1] trace [2] trace [3] trace [4] trace [5] \n\
537 ---------- ---------- ---------- ---------- ---------- \n\
540 #define MB_LEAK_HDR_64 "\n\
541 trace [1] trace [2] trace [3] \
542 trace [4] trace [5] \n\
543 ------------------ ------------------ ------------------ \
544 ------------------ ------------------ \n\
547 static uint32_t mleak_alloc_buckets
= MLEAK_ALLOCATION_MAP_NUM
;
548 static uint32_t mleak_trace_buckets
= MLEAK_TRACE_MAP_NUM
;
550 /* Hashmaps of allocations and their corresponding traces */
551 static struct mallocation
*mleak_allocations
;
552 static struct mtrace
*mleak_traces
;
553 static struct mtrace
*mleak_top_trace
[MLEAK_NUM_TRACES
];
555 /* Lock to protect mleak tables from concurrent modification */
556 decl_lck_mtx_data(static, mleak_lock_data
);
557 static lck_mtx_t
*mleak_lock
= &mleak_lock_data
;
558 static lck_attr_t
*mleak_lock_attr
;
559 static lck_grp_t
*mleak_lock_grp
;
560 static lck_grp_attr_t
*mleak_lock_grp_attr
;
562 /* Lock to protect the completion callback table */
563 static lck_grp_attr_t
*mbuf_tx_compl_tbl_lck_grp_attr
= NULL
;
564 static lck_attr_t
*mbuf_tx_compl_tbl_lck_attr
= NULL
;
565 static lck_grp_t
*mbuf_tx_compl_tbl_lck_grp
= NULL
;
566 decl_lck_rw_data(, mbuf_tx_compl_tbl_lck_rw_data
);
567 lck_rw_t
*mbuf_tx_compl_tbl_lock
= &mbuf_tx_compl_tbl_lck_rw_data
;
569 extern u_int32_t high_sb_max
;
571 /* The minimum number of objects that are allocated, to start. */
573 #define MINBIGCL (MINCL >> 1)
574 #define MIN16KCL (MINCL >> 2)
576 /* Low watermarks (only map in pages once free counts go below) */
577 #define MBIGCL_LOWAT MINBIGCL
578 #define M16KCL_LOWAT MIN16KCL
581 mbuf_class_t mtbl_class
; /* class type */
582 mcache_t
*mtbl_cache
; /* mcache for this buffer class */
583 TAILQ_HEAD(mcl_slhead
, mcl_slab
) mtbl_slablist
; /* slab list */
584 mcache_obj_t
*mtbl_cobjlist
; /* composite objects freelist */
585 mb_class_stat_t
*mtbl_stats
; /* statistics fetchable via sysctl */
586 u_int32_t mtbl_maxsize
; /* maximum buffer size */
587 int mtbl_minlimit
; /* minimum allowed */
588 int mtbl_maxlimit
; /* maximum allowed */
589 u_int32_t mtbl_wantpurge
; /* purge during next reclaim */
590 uint32_t mtbl_avgtotal
; /* average total on iOS */
593 #define m_class(c) mbuf_table[c].mtbl_class
594 #define m_cache(c) mbuf_table[c].mtbl_cache
595 #define m_slablist(c) mbuf_table[c].mtbl_slablist
596 #define m_cobjlist(c) mbuf_table[c].mtbl_cobjlist
597 #define m_maxsize(c) mbuf_table[c].mtbl_maxsize
598 #define m_minlimit(c) mbuf_table[c].mtbl_minlimit
599 #define m_maxlimit(c) mbuf_table[c].mtbl_maxlimit
600 #define m_wantpurge(c) mbuf_table[c].mtbl_wantpurge
601 #define m_avgtotal(c) mbuf_table[c].mtbl_avgtotal
602 #define m_cname(c) mbuf_table[c].mtbl_stats->mbcl_cname
603 #define m_size(c) mbuf_table[c].mtbl_stats->mbcl_size
604 #define m_total(c) mbuf_table[c].mtbl_stats->mbcl_total
605 #define m_active(c) mbuf_table[c].mtbl_stats->mbcl_active
606 #define m_infree(c) mbuf_table[c].mtbl_stats->mbcl_infree
607 #define m_slab_cnt(c) mbuf_table[c].mtbl_stats->mbcl_slab_cnt
608 #define m_alloc_cnt(c) mbuf_table[c].mtbl_stats->mbcl_alloc_cnt
609 #define m_free_cnt(c) mbuf_table[c].mtbl_stats->mbcl_free_cnt
610 #define m_notified(c) mbuf_table[c].mtbl_stats->mbcl_notified
611 #define m_purge_cnt(c) mbuf_table[c].mtbl_stats->mbcl_purge_cnt
612 #define m_fail_cnt(c) mbuf_table[c].mtbl_stats->mbcl_fail_cnt
613 #define m_ctotal(c) mbuf_table[c].mtbl_stats->mbcl_ctotal
614 #define m_peak(c) mbuf_table[c].mtbl_stats->mbcl_peak_reported
615 #define m_release_cnt(c) mbuf_table[c].mtbl_stats->mbcl_release_cnt
617 static mbuf_table_t mbuf_table
[] = {
619 * The caches for mbufs, regular clusters and big clusters.
620 * The average total values were based on data gathered by actual
621 * usage patterns on iOS.
623 { MC_MBUF
, NULL
, TAILQ_HEAD_INITIALIZER(m_slablist(MC_MBUF
)),
624 NULL
, NULL
, 0, 0, 0, 0, 3000 },
625 { MC_CL
, NULL
, TAILQ_HEAD_INITIALIZER(m_slablist(MC_CL
)),
626 NULL
, NULL
, 0, 0, 0, 0, 2000 },
627 { MC_BIGCL
, NULL
, TAILQ_HEAD_INITIALIZER(m_slablist(MC_BIGCL
)),
628 NULL
, NULL
, 0, 0, 0, 0, 1000 },
629 { MC_16KCL
, NULL
, TAILQ_HEAD_INITIALIZER(m_slablist(MC_16KCL
)),
630 NULL
, NULL
, 0, 0, 0, 0, 1000 },
632 * The following are special caches; they serve as intermediate
633 * caches backed by the above rudimentary caches. Each object
634 * in the cache is an mbuf with a cluster attached to it. Unlike
635 * the above caches, these intermediate caches do not directly
636 * deal with the slab structures; instead, the constructed
637 * cached elements are simply stored in the freelists.
639 { MC_MBUF_CL
, NULL
, { NULL
, NULL
}, NULL
, NULL
, 0, 0, 0, 0, 2000 },
640 { MC_MBUF_BIGCL
, NULL
, { NULL
, NULL
}, NULL
, NULL
, 0, 0, 0, 0, 1000 },
641 { MC_MBUF_16KCL
, NULL
, { NULL
, NULL
}, NULL
, NULL
, 0, 0, 0, 0, 1000 },
644 #define NELEM(a) (sizeof (a) / sizeof ((a)[0]))
646 static void *mb_waitchan
= &mbuf_table
; /* wait channel for all caches */
647 static int mb_waiters
; /* number of waiters */
649 boolean_t mb_peak_newreport
= FALSE
;
650 boolean_t mb_peak_firstreport
= FALSE
;
652 /* generate a report by default after 1 week of uptime */
653 #define MBUF_PEAK_FIRST_REPORT_THRESHOLD 604800
655 #define MB_WDT_MAXTIME 10 /* # of secs before watchdog panic */
656 static struct timeval mb_wdtstart
; /* watchdog start timestamp */
657 static char *mbuf_dump_buf
;
659 #define MBUF_DUMP_BUF_SIZE 2048
662 * mbuf watchdog is enabled by default on embedded platforms. It is
663 * also toggeable via the kern.ipc.mb_watchdog sysctl.
664 * Garbage collection is also enabled by default on embedded platforms.
665 * mb_drain_maxint controls the amount of time to wait (in seconds) before
666 * consecutive calls to m_drain().
668 static unsigned int mb_watchdog
= 0;
669 static unsigned int mb_drain_maxint
= 0;
671 uintptr_t mb_obscure_extfree
__attribute__((visibility("hidden")));
672 uintptr_t mb_obscure_extref
__attribute__((visibility("hidden")));
675 static u_int32_t mb_redzone_cookie
;
676 static void m_redzone_init(struct mbuf
*);
677 static void m_redzone_verify(struct mbuf
*m
);
679 /* The following are used to serialize m_clalloc() */
680 static boolean_t mb_clalloc_busy
;
681 static void *mb_clalloc_waitchan
= &mb_clalloc_busy
;
682 static int mb_clalloc_waiters
;
684 static void mbuf_mtypes_sync(boolean_t
);
685 static int mbstat_sysctl SYSCTL_HANDLER_ARGS
;
686 static void mbuf_stat_sync(void);
687 static int mb_stat_sysctl SYSCTL_HANDLER_ARGS
;
688 static int mleak_top_trace_sysctl SYSCTL_HANDLER_ARGS
;
689 static int mleak_table_sysctl SYSCTL_HANDLER_ARGS
;
690 static char *mbuf_dump(void);
691 static void mbuf_table_init(void);
692 static inline void m_incref(struct mbuf
*);
693 static inline u_int16_t
m_decref(struct mbuf
*);
694 static int m_clalloc(const u_int32_t
, const int, const u_int32_t
);
695 static void mbuf_worker_thread_init(void);
696 static mcache_obj_t
*slab_alloc(mbuf_class_t
, int);
697 static void slab_free(mbuf_class_t
, mcache_obj_t
*);
698 static unsigned int mbuf_slab_alloc(void *, mcache_obj_t
***,
700 static void mbuf_slab_free(void *, mcache_obj_t
*, int);
701 static void mbuf_slab_audit(void *, mcache_obj_t
*, boolean_t
);
702 static void mbuf_slab_notify(void *, u_int32_t
);
703 static unsigned int cslab_alloc(mbuf_class_t
, mcache_obj_t
***,
705 static unsigned int cslab_free(mbuf_class_t
, mcache_obj_t
*, int);
706 static unsigned int mbuf_cslab_alloc(void *, mcache_obj_t
***,
708 static void mbuf_cslab_free(void *, mcache_obj_t
*, int);
709 static void mbuf_cslab_audit(void *, mcache_obj_t
*, boolean_t
);
710 static int freelist_populate(mbuf_class_t
, unsigned int, int);
711 static void freelist_init(mbuf_class_t
);
712 static boolean_t
mbuf_cached_above(mbuf_class_t
, int);
713 static boolean_t
mbuf_steal(mbuf_class_t
, unsigned int);
714 static void m_reclaim(mbuf_class_t
, unsigned int, boolean_t
);
715 static int m_howmany(int, size_t);
716 static void mbuf_worker_thread(void);
717 static void mbuf_watchdog(void);
718 static boolean_t
mbuf_sleep(mbuf_class_t
, unsigned int, int);
720 static void mcl_audit_init(void *, mcache_audit_t
**, mcache_obj_t
**,
721 size_t, unsigned int);
722 static void mcl_audit_free(void *, unsigned int);
723 static mcache_audit_t
*mcl_audit_buf2mca(mbuf_class_t
, mcache_obj_t
*);
724 static void mcl_audit_mbuf(mcache_audit_t
*, void *, boolean_t
, boolean_t
);
725 static void mcl_audit_cluster(mcache_audit_t
*, void *, size_t, boolean_t
,
727 static void mcl_audit_restore_mbuf(struct mbuf
*, mcache_audit_t
*, boolean_t
);
728 static void mcl_audit_save_mbuf(struct mbuf
*, mcache_audit_t
*);
729 static void mcl_audit_scratch(mcache_audit_t
*);
730 static void mcl_audit_mcheck_panic(struct mbuf
*);
731 static void mcl_audit_verify_nextptr(void *, mcache_audit_t
*);
733 static void mleak_activate(void);
734 static void mleak_logger(u_int32_t
, mcache_obj_t
*, boolean_t
);
735 static boolean_t
mleak_log(uintptr_t *, mcache_obj_t
*, uint32_t, int);
736 static void mleak_free(mcache_obj_t
*);
737 static void mleak_sort_traces(void);
738 static void mleak_update_stats(void);
740 static mcl_slab_t
*slab_get(void *);
741 static void slab_init(mcl_slab_t
*, mbuf_class_t
, u_int32_t
,
742 void *, void *, unsigned int, int, int);
743 static void slab_insert(mcl_slab_t
*, mbuf_class_t
);
744 static void slab_remove(mcl_slab_t
*, mbuf_class_t
);
745 static boolean_t
slab_inrange(mcl_slab_t
*, void *);
746 static void slab_nextptr_panic(mcl_slab_t
*, void *);
747 static void slab_detach(mcl_slab_t
*);
748 static boolean_t
slab_is_detached(mcl_slab_t
*);
750 static int m_copyback0(struct mbuf
**, int, int, const void *, int, int);
751 static struct mbuf
*m_split0(struct mbuf
*, int, int, int);
752 __private_extern__
void mbuf_report_peak_usage(void);
753 static boolean_t
mbuf_report_usage(mbuf_class_t
);
755 /* flags for m_copyback0 */
756 #define M_COPYBACK0_COPYBACK 0x0001 /* copyback from cp */
757 #define M_COPYBACK0_PRESERVE 0x0002 /* preserve original data */
758 #define M_COPYBACK0_COW 0x0004 /* do copy-on-write */
759 #define M_COPYBACK0_EXTEND 0x0008 /* extend chain */
762 * This flag is set for all mbufs that come out of and into the composite
763 * mbuf + cluster caches, i.e. MC_MBUF_CL and MC_MBUF_BIGCL. mbufs that
764 * are marked with such a flag have clusters attached to them, and will be
765 * treated differently when they are freed; instead of being placed back
766 * into the mbuf and cluster freelists, the composite mbuf + cluster objects
767 * are placed back into the appropriate composite cache's freelist, and the
768 * actual freeing is deferred until the composite objects are purged. At
769 * such a time, this flag will be cleared from the mbufs and the objects
770 * will be freed into their own separate freelists.
772 #define EXTF_COMPOSITE 0x1
775 * This flag indicates that the external cluster is read-only, i.e. it is
776 * or was referred to by more than one mbufs. Once set, this flag is never
779 #define EXTF_READONLY 0x2
781 * This flag indicates that the external cluster is paired with the mbuf.
782 * Pairing implies an external free routine defined which will be invoked
783 * when the reference count drops to the minimum at m_free time. This
784 * flag is never cleared.
786 #define EXTF_PAIRED 0x4
789 (EXTF_COMPOSITE | EXTF_READONLY | EXTF_PAIRED)
791 #define MEXT_MINREF(m) ((m_get_rfa(m))->minref)
792 #define MEXT_REF(m) ((m_get_rfa(m))->refcnt)
793 #define MEXT_PREF(m) ((m_get_rfa(m))->prefcnt)
794 #define MEXT_FLAGS(m) ((m_get_rfa(m))->flags)
795 #define MEXT_PRIV(m) ((m_get_rfa(m))->priv)
796 #define MEXT_PMBUF(m) ((m_get_rfa(m))->paired)
797 #define MEXT_TOKEN(m) ((m_get_rfa(m))->ext_token)
798 #define MBUF_IS_COMPOSITE(m) \
799 (MEXT_REF(m) == MEXT_MINREF(m) && \
800 (MEXT_FLAGS(m) & EXTF_MASK) == EXTF_COMPOSITE)
802 * This macro can be used to test if the mbuf is paired to an external
803 * cluster. The test for MEXT_PMBUF being equal to the mbuf in subject
804 * is important, as EXTF_PAIRED alone is insufficient since it is immutable,
805 * and thus survives calls to m_free_paired.
807 #define MBUF_IS_PAIRED(m) \
808 (((m)->m_flags & M_EXT) && \
809 (MEXT_FLAGS(m) & EXTF_MASK) == EXTF_PAIRED && \
810 MEXT_PMBUF(m) == (m))
813 * Macros used to verify the integrity of the mbuf.
815 #define _MCHECK(m) { \
816 if ((m)->m_type != MT_FREE && !MBUF_IS_PAIRED(m)) { \
817 if (mclaudit == NULL) \
818 panic("MCHECK: m_type=%d m=%p", \
819 (u_int16_t)(m)->m_type, m); \
821 mcl_audit_mcheck_panic(m); \
825 #define MBUF_IN_MAP(addr) \
826 ((unsigned char *)(addr) >= mbutl && \
827 (unsigned char *)(addr) < embutl)
829 #define MRANGE(addr) { \
830 if (!MBUF_IN_MAP(addr)) \
831 panic("MRANGE: address out of range 0x%p", addr); \
835 * Macro version of mtod.
837 #define MTOD(m, t) ((t)((m)->m_data))
840 * Macros to obtain page index given a base cluster address
842 #define MTOPG(x) (((unsigned char *)x - mbutl) >> PAGE_SHIFT)
843 #define PGTOM(x) (mbutl + (x << PAGE_SHIFT))
846 * Macro to find the mbuf index relative to a base.
848 #define MBPAGEIDX(c, m) \
849 (((unsigned char *)(m) - (unsigned char *)(c)) >> MSIZESHIFT)
852 * Same thing for 2KB cluster index.
854 #define CLPAGEIDX(c, m) \
855 (((unsigned char *)(m) - (unsigned char *)(c)) >> MCLSHIFT)
858 * Macro to find 4KB cluster index relative to a base
860 #define BCLPAGEIDX(c, m) \
861 (((unsigned char *)(m) - (unsigned char *)(c)) >> MBIGCLSHIFT)
864 * Macros used during mbuf and cluster initialization.
866 #define MBUF_INIT_PKTHDR(m) { \
867 (m)->m_pkthdr.rcvif = NULL; \
868 (m)->m_pkthdr.pkt_hdr = NULL; \
869 (m)->m_pkthdr.len = 0; \
870 (m)->m_pkthdr.csum_flags = 0; \
871 (m)->m_pkthdr.csum_data = 0; \
872 (m)->m_pkthdr.vlan_tag = 0; \
873 m_classifier_init(m, 0); \
879 #define MBUF_INIT(m, pkthdr, type) { \
881 (m)->m_next = (m)->m_nextpkt = NULL; \
883 (m)->m_type = type; \
884 if ((pkthdr) == 0) { \
885 (m)->m_data = (m)->m_dat; \
888 (m)->m_data = (m)->m_pktdat; \
889 (m)->m_flags = M_PKTHDR; \
890 MBUF_INIT_PKTHDR(m); \
894 #define MEXT_INIT(m, buf, size, free, arg, rfa, min, ref, pref, flag, \
896 (m)->m_data = (m)->m_ext.ext_buf = (buf); \
897 (m)->m_flags |= M_EXT; \
898 m_set_ext((m), (rfa), (free), (arg)); \
899 (m)->m_ext.ext_size = (size); \
900 MEXT_MINREF(m) = (min); \
901 MEXT_REF(m) = (ref); \
902 MEXT_PREF(m) = (pref); \
903 MEXT_FLAGS(m) = (flag); \
904 MEXT_PRIV(m) = (priv); \
905 MEXT_PMBUF(m) = (pm); \
908 #define MBUF_CL_INIT(m, buf, rfa, ref, flag) \
909 MEXT_INIT(m, buf, m_maxsize(MC_CL), NULL, NULL, rfa, 0, \
910 ref, 0, flag, 0, NULL)
912 #define MBUF_BIGCL_INIT(m, buf, rfa, ref, flag) \
913 MEXT_INIT(m, buf, m_maxsize(MC_BIGCL), m_bigfree, NULL, rfa, 0, \
914 ref, 0, flag, 0, NULL)
916 #define MBUF_16KCL_INIT(m, buf, rfa, ref, flag) \
917 MEXT_INIT(m, buf, m_maxsize(MC_16KCL), m_16kfree, NULL, rfa, 0, \
918 ref, 0, flag, 0, NULL)
921 * Macro to convert BSD malloc sleep flag to mcache's
923 #define MSLEEPF(f) ((!((f) & M_DONTWAIT)) ? MCR_SLEEP : MCR_NOSLEEP)
926 * The structure that holds all mbuf class statistics exportable via sysctl.
927 * Similar to mbstat structure, the mb_stat structure is protected by the
928 * global mbuf lock. It contains additional information about the classes
929 * that allows for a more accurate view of the state of the allocator.
931 struct mb_stat
*mb_stat
;
932 struct omb_stat
*omb_stat
; /* For backwards compatibility */
934 #define MB_STAT_SIZE(n) \
935 ((size_t)(&((mb_stat_t *)0)->mbs_class[n]))
936 #define OMB_STAT_SIZE(n) \
937 ((size_t)(&((struct omb_stat *)0)->mbs_class[n]))
940 * The legacy structure holding all of the mbuf allocation statistics.
941 * The actual statistics used by the kernel are stored in the mbuf_table
942 * instead, and are updated atomically while the global mbuf lock is held.
943 * They are mirrored in mbstat to support legacy applications (e.g. netstat).
944 * Unlike before, the kernel no longer relies on the contents of mbstat for
945 * its operations (e.g. cluster expansion) because the structure is exposed
946 * to outside and could possibly be modified, therefore making it unsafe.
947 * With the exception of the mbstat.m_mtypes array (see below), all of the
948 * statistics are updated as they change.
950 struct mbstat mbstat
;
952 #define MBSTAT_MTYPES_MAX \
953 (sizeof (mbstat.m_mtypes) / sizeof (mbstat.m_mtypes[0]))
956 * Allocation statistics related to mbuf types (up to MT_MAX-1) are updated
957 * atomically and stored in a per-CPU structure which is lock-free; this is
958 * done in order to avoid writing to the global mbstat data structure which
959 * would cause false sharing. During sysctl request for kern.ipc.mbstat,
960 * the statistics across all CPUs will be converged into the mbstat.m_mtypes
961 * array and returned to the application. Any updates for types greater or
962 * equal than MT_MAX would be done atomically to the mbstat; this slows down
963 * performance but is okay since the kernel uses only up to MT_MAX-1 while
964 * anything beyond that (up to type 255) is considered a corner case.
967 unsigned int cpu_mtypes
[MT_MAX
];
968 } __attribute__((aligned(MAX_CPU_CACHE_LINE_SIZE
), packed
)) mtypes_cpu_t
;
971 mtypes_cpu_t mbs_cpu
[1];
974 static mbuf_mtypes_t
*mbuf_mtypes
; /* per-CPU statistics */
976 #define MBUF_MTYPES_SIZE(n) \
977 ((size_t)(&((mbuf_mtypes_t *)0)->mbs_cpu[n]))
979 #define MTYPES_CPU(p) \
980 ((mtypes_cpu_t *)(void *)((char *)(p) + MBUF_MTYPES_SIZE(cpu_number())))
982 #define mtype_stat_add(type, n) { \
983 if ((unsigned)(type) < MT_MAX) { \
984 mtypes_cpu_t *mbs = MTYPES_CPU(mbuf_mtypes); \
985 atomic_add_32(&mbs->cpu_mtypes[type], n); \
986 } else if ((unsigned)(type) < (unsigned)MBSTAT_MTYPES_MAX) { \
987 atomic_add_16((int16_t *)&mbstat.m_mtypes[type], n); \
991 #define mtype_stat_sub(t, n) mtype_stat_add(t, -(n))
992 #define mtype_stat_inc(t) mtype_stat_add(t, 1)
993 #define mtype_stat_dec(t) mtype_stat_sub(t, 1)
996 mbuf_mtypes_sync(boolean_t locked
)
1002 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
1004 bzero(&mtc
, sizeof (mtc
));
1005 for (m
= 0; m
< ncpu
; m
++) {
1006 mtypes_cpu_t
*scp
= &mbuf_mtypes
->mbs_cpu
[m
];
1009 bcopy(&scp
->cpu_mtypes
, &temp
.cpu_mtypes
,
1010 sizeof (temp
.cpu_mtypes
));
1012 for (n
= 0; n
< MT_MAX
; n
++)
1013 mtc
.cpu_mtypes
[n
] += temp
.cpu_mtypes
[n
];
1016 lck_mtx_lock(mbuf_mlock
);
1017 for (n
= 0; n
< MT_MAX
; n
++)
1018 mbstat
.m_mtypes
[n
] = mtc
.cpu_mtypes
[n
];
1020 lck_mtx_unlock(mbuf_mlock
);
1024 mbstat_sysctl SYSCTL_HANDLER_ARGS
1026 #pragma unused(oidp, arg1, arg2)
1027 mbuf_mtypes_sync(FALSE
);
1029 return (SYSCTL_OUT(req
, &mbstat
, sizeof (mbstat
)));
1033 mbuf_stat_sync(void)
1035 mb_class_stat_t
*sp
;
1040 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
1042 for (k
= 0; k
< NELEM(mbuf_table
); k
++) {
1044 ccp
= &cp
->mc_cpu
[0];
1045 bktsize
= ccp
->cc_bktsize
;
1046 sp
= mbuf_table
[k
].mtbl_stats
;
1048 if (cp
->mc_flags
& MCF_NOCPUCACHE
)
1049 sp
->mbcl_mc_state
= MCS_DISABLED
;
1050 else if (cp
->mc_purge_cnt
> 0)
1051 sp
->mbcl_mc_state
= MCS_PURGING
;
1052 else if (bktsize
== 0)
1053 sp
->mbcl_mc_state
= MCS_OFFLINE
;
1055 sp
->mbcl_mc_state
= MCS_ONLINE
;
1057 sp
->mbcl_mc_cached
= 0;
1058 for (m
= 0; m
< ncpu
; m
++) {
1059 ccp
= &cp
->mc_cpu
[m
];
1060 if (ccp
->cc_objs
> 0)
1061 sp
->mbcl_mc_cached
+= ccp
->cc_objs
;
1062 if (ccp
->cc_pobjs
> 0)
1063 sp
->mbcl_mc_cached
+= ccp
->cc_pobjs
;
1065 sp
->mbcl_mc_cached
+= (cp
->mc_full
.bl_total
* bktsize
);
1066 sp
->mbcl_active
= sp
->mbcl_total
- sp
->mbcl_mc_cached
-
1069 sp
->mbcl_mc_waiter_cnt
= cp
->mc_waiter_cnt
;
1070 sp
->mbcl_mc_wretry_cnt
= cp
->mc_wretry_cnt
;
1071 sp
->mbcl_mc_nwretry_cnt
= cp
->mc_nwretry_cnt
;
1073 /* Calculate total count specific to each class */
1074 sp
->mbcl_ctotal
= sp
->mbcl_total
;
1075 switch (m_class(k
)) {
1077 /* Deduct mbufs used in composite caches */
1078 sp
->mbcl_ctotal
-= (m_total(MC_MBUF_CL
) +
1079 m_total(MC_MBUF_BIGCL
));
1083 /* Deduct clusters used in composite cache */
1084 sp
->mbcl_ctotal
-= m_total(MC_MBUF_CL
);
1088 /* Deduct clusters used in composite cache */
1089 sp
->mbcl_ctotal
-= m_total(MC_MBUF_BIGCL
);
1093 /* Deduct clusters used in composite cache */
1094 sp
->mbcl_ctotal
-= m_total(MC_MBUF_16KCL
);
1104 mb_stat_sysctl SYSCTL_HANDLER_ARGS
1106 #pragma unused(oidp, arg1, arg2)
1108 int k
, statsz
, proc64
= proc_is64bit(req
->p
);
1110 lck_mtx_lock(mbuf_mlock
);
1114 struct omb_class_stat
*oc
;
1115 struct mb_class_stat
*c
;
1117 omb_stat
->mbs_cnt
= mb_stat
->mbs_cnt
;
1118 oc
= &omb_stat
->mbs_class
[0];
1119 c
= &mb_stat
->mbs_class
[0];
1120 for (k
= 0; k
< omb_stat
->mbs_cnt
; k
++, oc
++, c
++) {
1121 (void) snprintf(oc
->mbcl_cname
, sizeof (oc
->mbcl_cname
),
1122 "%s", c
->mbcl_cname
);
1123 oc
->mbcl_size
= c
->mbcl_size
;
1124 oc
->mbcl_total
= c
->mbcl_total
;
1125 oc
->mbcl_active
= c
->mbcl_active
;
1126 oc
->mbcl_infree
= c
->mbcl_infree
;
1127 oc
->mbcl_slab_cnt
= c
->mbcl_slab_cnt
;
1128 oc
->mbcl_alloc_cnt
= c
->mbcl_alloc_cnt
;
1129 oc
->mbcl_free_cnt
= c
->mbcl_free_cnt
;
1130 oc
->mbcl_notified
= c
->mbcl_notified
;
1131 oc
->mbcl_purge_cnt
= c
->mbcl_purge_cnt
;
1132 oc
->mbcl_fail_cnt
= c
->mbcl_fail_cnt
;
1133 oc
->mbcl_ctotal
= c
->mbcl_ctotal
;
1134 oc
->mbcl_release_cnt
= c
->mbcl_release_cnt
;
1135 oc
->mbcl_mc_state
= c
->mbcl_mc_state
;
1136 oc
->mbcl_mc_cached
= c
->mbcl_mc_cached
;
1137 oc
->mbcl_mc_waiter_cnt
= c
->mbcl_mc_waiter_cnt
;
1138 oc
->mbcl_mc_wretry_cnt
= c
->mbcl_mc_wretry_cnt
;
1139 oc
->mbcl_mc_nwretry_cnt
= c
->mbcl_mc_nwretry_cnt
;
1142 statsz
= OMB_STAT_SIZE(NELEM(mbuf_table
));
1145 statsz
= MB_STAT_SIZE(NELEM(mbuf_table
));
1148 lck_mtx_unlock(mbuf_mlock
);
1150 return (SYSCTL_OUT(req
, statp
, statsz
));
1154 mleak_top_trace_sysctl SYSCTL_HANDLER_ARGS
1156 #pragma unused(oidp, arg1, arg2)
1159 /* Ensure leak tracing turned on */
1160 if (!mclfindleak
|| !mclexpleak
)
1163 lck_mtx_lock(mleak_lock
);
1164 mleak_update_stats();
1165 i
= SYSCTL_OUT(req
, mleak_stat
, MLEAK_STAT_SIZE(MLEAK_NUM_TRACES
));
1166 lck_mtx_unlock(mleak_lock
);
1172 mleak_table_sysctl SYSCTL_HANDLER_ARGS
1174 #pragma unused(oidp, arg1, arg2)
1177 /* Ensure leak tracing turned on */
1178 if (!mclfindleak
|| !mclexpleak
)
1181 lck_mtx_lock(mleak_lock
);
1182 i
= SYSCTL_OUT(req
, &mleak_table
, sizeof (mleak_table
));
1183 lck_mtx_unlock(mleak_lock
);
1189 m_incref(struct mbuf
*m
)
1192 volatile UInt16
*addr
= (volatile UInt16
*)&MEXT_REF(m
);
1198 } while (!OSCompareAndSwap16(old
, new, addr
));
1201 * If cluster is shared, mark it with (sticky) EXTF_READONLY;
1202 * we don't clear the flag when the refcount goes back to the
1203 * minimum, to simplify code calling m_mclhasreference().
1205 if (new > (MEXT_MINREF(m
) + 1) && !(MEXT_FLAGS(m
) & EXTF_READONLY
))
1206 (void) OSBitOrAtomic16(EXTF_READONLY
, &MEXT_FLAGS(m
));
1209 static inline u_int16_t
1210 m_decref(struct mbuf
*m
)
1213 volatile UInt16
*addr
= (volatile UInt16
*)&MEXT_REF(m
);
1219 } while (!OSCompareAndSwap16(old
, new, addr
));
1225 mbuf_table_init(void)
1227 unsigned int b
, c
, s
;
1228 int m
, config_mbuf_jumbo
= 0;
1230 MALLOC(omb_stat
, struct omb_stat
*, OMB_STAT_SIZE(NELEM(mbuf_table
)),
1231 M_TEMP
, M_WAITOK
| M_ZERO
);
1232 VERIFY(omb_stat
!= NULL
);
1234 MALLOC(mb_stat
, mb_stat_t
*, MB_STAT_SIZE(NELEM(mbuf_table
)),
1235 M_TEMP
, M_WAITOK
| M_ZERO
);
1236 VERIFY(mb_stat
!= NULL
);
1238 mb_stat
->mbs_cnt
= NELEM(mbuf_table
);
1239 for (m
= 0; m
< NELEM(mbuf_table
); m
++)
1240 mbuf_table
[m
].mtbl_stats
= &mb_stat
->mbs_class
[m
];
1242 #if CONFIG_MBUF_JUMBO
1243 config_mbuf_jumbo
= 1;
1244 #endif /* CONFIG_MBUF_JUMBO */
1246 if (config_mbuf_jumbo
== 1 || PAGE_SIZE
== M16KCLBYTES
) {
1248 * Set aside 1/3 of the mbuf cluster map for jumbo
1249 * clusters; we do this only on platforms where jumbo
1250 * cluster pool is enabled.
1252 njcl
= nmbclusters
/ 3;
1253 njclbytes
= M16KCLBYTES
;
1257 * nclusters holds both the 2KB and 4KB pools, so ensure it's
1258 * a multiple of 4KB clusters.
1260 nclusters
= P2ROUNDDOWN(nmbclusters
- njcl
, NCLPG
);
1263 * Each jumbo cluster takes 8 2KB clusters, so make
1264 * sure that the pool size is evenly divisible by 8;
1265 * njcl is in 2KB unit, hence treated as such.
1267 njcl
= P2ROUNDDOWN(nmbclusters
- nclusters
, NCLPJCL
);
1269 /* Update nclusters with rounded down value of njcl */
1270 nclusters
= P2ROUNDDOWN(nmbclusters
- njcl
, NCLPG
);
1274 * njcl is valid only on platforms with 16KB jumbo clusters or
1275 * with 16KB pages, where it is configured to 1/3 of the pool
1276 * size. On these platforms, the remaining is used for 2KB
1277 * and 4KB clusters. On platforms without 16KB jumbo clusters,
1278 * the entire pool is used for both 2KB and 4KB clusters. A 4KB
1279 * cluster can either be splitted into 16 mbufs, or into 2 2KB
1282 * +---+---+------------ ... -----------+------- ... -------+
1283 * | c | b | s | njcl |
1284 * +---+---+------------ ... -----------+------- ... -------+
1286 * 1/32th of the shared region is reserved for pure 2KB and 4KB
1287 * clusters (1/64th each.)
1289 c
= P2ROUNDDOWN((nclusters
>> 6), NCLPG
); /* in 2KB unit */
1290 b
= P2ROUNDDOWN((nclusters
>> (6 + NCLPBGSHIFT
)), NBCLPG
); /* in 4KB unit */
1291 s
= nclusters
- (c
+ (b
<< NCLPBGSHIFT
)); /* in 2KB unit */
1294 * 1/64th (c) is reserved for 2KB clusters.
1296 m_minlimit(MC_CL
) = c
;
1297 m_maxlimit(MC_CL
) = s
+ c
; /* in 2KB unit */
1298 m_maxsize(MC_CL
) = m_size(MC_CL
) = MCLBYTES
;
1299 (void) snprintf(m_cname(MC_CL
), MAX_MBUF_CNAME
, "cl");
1302 * Another 1/64th (b) of the map is reserved for 4KB clusters.
1303 * It cannot be turned into 2KB clusters or mbufs.
1305 m_minlimit(MC_BIGCL
) = b
;
1306 m_maxlimit(MC_BIGCL
) = (s
>> NCLPBGSHIFT
) + b
; /* in 4KB unit */
1307 m_maxsize(MC_BIGCL
) = m_size(MC_BIGCL
) = MBIGCLBYTES
;
1308 (void) snprintf(m_cname(MC_BIGCL
), MAX_MBUF_CNAME
, "bigcl");
1311 * The remaining 31/32ths (s) are all-purpose (mbufs, 2KB, or 4KB)
1313 m_minlimit(MC_MBUF
) = 0;
1314 m_maxlimit(MC_MBUF
) = (s
<< NMBPCLSHIFT
); /* in mbuf unit */
1315 m_maxsize(MC_MBUF
) = m_size(MC_MBUF
) = MSIZE
;
1316 (void) snprintf(m_cname(MC_MBUF
), MAX_MBUF_CNAME
, "mbuf");
1319 * Set limits for the composite classes.
1321 m_minlimit(MC_MBUF_CL
) = 0;
1322 m_maxlimit(MC_MBUF_CL
) = m_maxlimit(MC_CL
);
1323 m_maxsize(MC_MBUF_CL
) = MCLBYTES
;
1324 m_size(MC_MBUF_CL
) = m_size(MC_MBUF
) + m_size(MC_CL
);
1325 (void) snprintf(m_cname(MC_MBUF_CL
), MAX_MBUF_CNAME
, "mbuf_cl");
1327 m_minlimit(MC_MBUF_BIGCL
) = 0;
1328 m_maxlimit(MC_MBUF_BIGCL
) = m_maxlimit(MC_BIGCL
);
1329 m_maxsize(MC_MBUF_BIGCL
) = MBIGCLBYTES
;
1330 m_size(MC_MBUF_BIGCL
) = m_size(MC_MBUF
) + m_size(MC_BIGCL
);
1331 (void) snprintf(m_cname(MC_MBUF_BIGCL
), MAX_MBUF_CNAME
, "mbuf_bigcl");
1334 * And for jumbo classes.
1336 m_minlimit(MC_16KCL
) = 0;
1337 m_maxlimit(MC_16KCL
) = (njcl
>> NCLPJCLSHIFT
); /* in 16KB unit */
1338 m_maxsize(MC_16KCL
) = m_size(MC_16KCL
) = M16KCLBYTES
;
1339 (void) snprintf(m_cname(MC_16KCL
), MAX_MBUF_CNAME
, "16kcl");
1341 m_minlimit(MC_MBUF_16KCL
) = 0;
1342 m_maxlimit(MC_MBUF_16KCL
) = m_maxlimit(MC_16KCL
);
1343 m_maxsize(MC_MBUF_16KCL
) = M16KCLBYTES
;
1344 m_size(MC_MBUF_16KCL
) = m_size(MC_MBUF
) + m_size(MC_16KCL
);
1345 (void) snprintf(m_cname(MC_MBUF_16KCL
), MAX_MBUF_CNAME
, "mbuf_16kcl");
1348 * Initialize the legacy mbstat structure.
1350 bzero(&mbstat
, sizeof (mbstat
));
1351 mbstat
.m_msize
= m_maxsize(MC_MBUF
);
1352 mbstat
.m_mclbytes
= m_maxsize(MC_CL
);
1353 mbstat
.m_minclsize
= MINCLSIZE
;
1354 mbstat
.m_mlen
= MLEN
;
1355 mbstat
.m_mhlen
= MHLEN
;
1356 mbstat
.m_bigmclbytes
= m_maxsize(MC_BIGCL
);
1359 #if defined(__LP64__)
1360 typedef struct ncl_tbl
{
1361 uint64_t nt_maxmem
; /* memory (sane) size */
1362 uint32_t nt_mbpool
; /* mbuf pool size */
1366 static ncl_tbl_t ncl_table
[] = {
1367 { (1ULL << GBSHIFT
) /* 1 GB */, (64 << MBSHIFT
) /* 64 MB */ },
1368 { (1ULL << (GBSHIFT
+ 3)) /* 8 GB */, (96 << MBSHIFT
) /* 96 MB */ },
1369 { (1ULL << (GBSHIFT
+ 4)) /* 16 GB */, (128 << MBSHIFT
) /* 128 MB */ },
1374 static ncl_tbl_t ncl_table_srv
[] = {
1375 { (1ULL << GBSHIFT
) /* 1 GB */, (96 << MBSHIFT
) /* 96 MB */ },
1376 { (1ULL << (GBSHIFT
+ 2)) /* 4 GB */, (128 << MBSHIFT
) /* 128 MB */ },
1377 { (1ULL << (GBSHIFT
+ 3)) /* 8 GB */, (160 << MBSHIFT
) /* 160 MB */ },
1378 { (1ULL << (GBSHIFT
+ 4)) /* 16 GB */, (192 << MBSHIFT
) /* 192 MB */ },
1379 { (1ULL << (GBSHIFT
+ 5)) /* 32 GB */, (256 << MBSHIFT
) /* 256 MB */ },
1380 { (1ULL << (GBSHIFT
+ 6)) /* 64 GB */, (384 << MBSHIFT
) /* 384 MB */ },
1383 #endif /* __LP64__ */
1385 __private_extern__
unsigned int
1386 mbuf_default_ncl(int server
, uint64_t mem
)
1388 #if !defined(__LP64__)
1389 #pragma unused(server)
1392 * 32-bit kernel (default to 64MB of mbuf pool for >= 1GB RAM).
1394 if ((n
= ((mem
/ 16) / MCLBYTES
)) > 32768)
1398 ncl_tbl_t
*tbl
= (server
? ncl_table_srv
: ncl_table
);
1400 * 64-bit kernel (mbuf pool size based on table).
1402 n
= tbl
[0].nt_mbpool
;
1403 for (i
= 0; tbl
[i
].nt_mbpool
!= 0; i
++) {
1404 if (mem
< tbl
[i
].nt_maxmem
)
1406 n
= tbl
[i
].nt_mbpool
;
1409 #endif /* !__LP64__ */
1413 __private_extern__
void
1417 unsigned int initmcl
= 0;
1419 thread_t thread
= THREAD_NULL
;
1421 microuptime(&mb_start
);
1424 * These MBUF_ values must be equal to their private counterparts.
1426 _CASSERT(MBUF_EXT
== M_EXT
);
1427 _CASSERT(MBUF_PKTHDR
== M_PKTHDR
);
1428 _CASSERT(MBUF_EOR
== M_EOR
);
1429 _CASSERT(MBUF_LOOP
== M_LOOP
);
1430 _CASSERT(MBUF_BCAST
== M_BCAST
);
1431 _CASSERT(MBUF_MCAST
== M_MCAST
);
1432 _CASSERT(MBUF_FRAG
== M_FRAG
);
1433 _CASSERT(MBUF_FIRSTFRAG
== M_FIRSTFRAG
);
1434 _CASSERT(MBUF_LASTFRAG
== M_LASTFRAG
);
1435 _CASSERT(MBUF_PROMISC
== M_PROMISC
);
1436 _CASSERT(MBUF_HASFCS
== M_HASFCS
);
1438 _CASSERT(MBUF_TYPE_FREE
== MT_FREE
);
1439 _CASSERT(MBUF_TYPE_DATA
== MT_DATA
);
1440 _CASSERT(MBUF_TYPE_HEADER
== MT_HEADER
);
1441 _CASSERT(MBUF_TYPE_SOCKET
== MT_SOCKET
);
1442 _CASSERT(MBUF_TYPE_PCB
== MT_PCB
);
1443 _CASSERT(MBUF_TYPE_RTABLE
== MT_RTABLE
);
1444 _CASSERT(MBUF_TYPE_HTABLE
== MT_HTABLE
);
1445 _CASSERT(MBUF_TYPE_ATABLE
== MT_ATABLE
);
1446 _CASSERT(MBUF_TYPE_SONAME
== MT_SONAME
);
1447 _CASSERT(MBUF_TYPE_SOOPTS
== MT_SOOPTS
);
1448 _CASSERT(MBUF_TYPE_FTABLE
== MT_FTABLE
);
1449 _CASSERT(MBUF_TYPE_RIGHTS
== MT_RIGHTS
);
1450 _CASSERT(MBUF_TYPE_IFADDR
== MT_IFADDR
);
1451 _CASSERT(MBUF_TYPE_CONTROL
== MT_CONTROL
);
1452 _CASSERT(MBUF_TYPE_OOBDATA
== MT_OOBDATA
);
1454 _CASSERT(MBUF_TSO_IPV4
== CSUM_TSO_IPV4
);
1455 _CASSERT(MBUF_TSO_IPV6
== CSUM_TSO_IPV6
);
1456 _CASSERT(MBUF_CSUM_REQ_SUM16
== CSUM_PARTIAL
);
1457 _CASSERT(MBUF_CSUM_TCP_SUM16
== MBUF_CSUM_REQ_SUM16
);
1458 _CASSERT(MBUF_CSUM_REQ_IP
== CSUM_IP
);
1459 _CASSERT(MBUF_CSUM_REQ_TCP
== CSUM_TCP
);
1460 _CASSERT(MBUF_CSUM_REQ_UDP
== CSUM_UDP
);
1461 _CASSERT(MBUF_CSUM_REQ_TCPIPV6
== CSUM_TCPIPV6
);
1462 _CASSERT(MBUF_CSUM_REQ_UDPIPV6
== CSUM_UDPIPV6
);
1463 _CASSERT(MBUF_CSUM_DID_IP
== CSUM_IP_CHECKED
);
1464 _CASSERT(MBUF_CSUM_IP_GOOD
== CSUM_IP_VALID
);
1465 _CASSERT(MBUF_CSUM_DID_DATA
== CSUM_DATA_VALID
);
1466 _CASSERT(MBUF_CSUM_PSEUDO_HDR
== CSUM_PSEUDO_HDR
);
1468 _CASSERT(MBUF_WAITOK
== M_WAIT
);
1469 _CASSERT(MBUF_DONTWAIT
== M_DONTWAIT
);
1470 _CASSERT(MBUF_COPYALL
== M_COPYALL
);
1472 _CASSERT(MBUF_SC2TC(MBUF_SC_BK_SYS
) == MBUF_TC_BK
);
1473 _CASSERT(MBUF_SC2TC(MBUF_SC_BK
) == MBUF_TC_BK
);
1474 _CASSERT(MBUF_SC2TC(MBUF_SC_BE
) == MBUF_TC_BE
);
1475 _CASSERT(MBUF_SC2TC(MBUF_SC_RD
) == MBUF_TC_BE
);
1476 _CASSERT(MBUF_SC2TC(MBUF_SC_OAM
) == MBUF_TC_BE
);
1477 _CASSERT(MBUF_SC2TC(MBUF_SC_AV
) == MBUF_TC_VI
);
1478 _CASSERT(MBUF_SC2TC(MBUF_SC_RV
) == MBUF_TC_VI
);
1479 _CASSERT(MBUF_SC2TC(MBUF_SC_VI
) == MBUF_TC_VI
);
1480 _CASSERT(MBUF_SC2TC(MBUF_SC_VO
) == MBUF_TC_VO
);
1481 _CASSERT(MBUF_SC2TC(MBUF_SC_CTL
) == MBUF_TC_VO
);
1483 _CASSERT(MBUF_TC2SCVAL(MBUF_TC_BK
) == SCVAL_BK
);
1484 _CASSERT(MBUF_TC2SCVAL(MBUF_TC_BE
) == SCVAL_BE
);
1485 _CASSERT(MBUF_TC2SCVAL(MBUF_TC_VI
) == SCVAL_VI
);
1486 _CASSERT(MBUF_TC2SCVAL(MBUF_TC_VO
) == SCVAL_VO
);
1488 /* Module specific scratch space (32-bit alignment requirement) */
1489 _CASSERT(!(offsetof(struct mbuf
, m_pkthdr
.pkt_mpriv
) %
1490 sizeof (uint32_t)));
1492 /* Initialize random red zone cookie value */
1493 _CASSERT(sizeof (mb_redzone_cookie
) ==
1494 sizeof (((struct pkthdr
*)0)->redzone
));
1495 read_random(&mb_redzone_cookie
, sizeof (mb_redzone_cookie
));
1496 read_random(&mb_obscure_extref
, sizeof (mb_obscure_extref
));
1497 read_random(&mb_obscure_extfree
, sizeof (mb_obscure_extfree
));
1498 mb_obscure_extref
|= 0x3;
1499 mb_obscure_extfree
|= 0x3;
1501 /* Make sure we don't save more than we should */
1502 _CASSERT(MCA_SAVED_MBUF_SIZE
<= sizeof (struct mbuf
));
1504 if (nmbclusters
== 0)
1505 nmbclusters
= NMBCLUSTERS
;
1507 /* This should be a sane (at least even) value by now */
1508 VERIFY(nmbclusters
!= 0 && !(nmbclusters
& 0x1));
1510 /* Setup the mbuf table */
1513 /* Global lock for common layer */
1514 mbuf_mlock_grp_attr
= lck_grp_attr_alloc_init();
1515 mbuf_mlock_grp
= lck_grp_alloc_init("mbuf", mbuf_mlock_grp_attr
);
1516 mbuf_mlock_attr
= lck_attr_alloc_init();
1517 lck_mtx_init(mbuf_mlock
, mbuf_mlock_grp
, mbuf_mlock_attr
);
1520 * Allocate cluster slabs table:
1522 * maxslabgrp = (N * 2048) / (1024 * 1024)
1524 * Where N is nmbclusters rounded up to the nearest 512. This yields
1525 * mcl_slab_g_t units, each one representing a MB of memory.
1528 (P2ROUNDUP(nmbclusters
, (MBSIZE
>> MCLSHIFT
)) << MCLSHIFT
) >> MBSHIFT
;
1529 MALLOC(slabstbl
, mcl_slabg_t
**, maxslabgrp
* sizeof (mcl_slabg_t
*),
1530 M_TEMP
, M_WAITOK
| M_ZERO
);
1531 VERIFY(slabstbl
!= NULL
);
1534 * Allocate audit structures, if needed:
1536 * maxclaudit = (maxslabgrp * 1024 * 1024) / PAGE_SIZE
1538 * This yields mcl_audit_t units, each one representing a page.
1540 PE_parse_boot_argn("mbuf_debug", &mbuf_debug
, sizeof (mbuf_debug
));
1541 mbuf_debug
|= mcache_getflags();
1542 if (mbuf_debug
& MCF_DEBUG
) {
1545 maxclaudit
= ((maxslabgrp
<< MBSHIFT
) >> PAGE_SHIFT
);
1546 MALLOC(mclaudit
, mcl_audit_t
*, maxclaudit
* sizeof (*mclaudit
),
1547 M_TEMP
, M_WAITOK
| M_ZERO
);
1548 VERIFY(mclaudit
!= NULL
);
1549 for (l
= 0, mclad
= mclaudit
; l
< maxclaudit
; l
++) {
1550 MALLOC(mclad
[l
].cl_audit
, mcache_audit_t
**,
1551 NMBPG
* sizeof(mcache_audit_t
*),
1552 M_TEMP
, M_WAITOK
| M_ZERO
);
1553 VERIFY(mclad
[l
].cl_audit
!= NULL
);
1556 mcl_audit_con_cache
= mcache_create("mcl_audit_contents",
1557 AUDIT_CONTENTS_SIZE
, sizeof (u_int64_t
), 0, MCR_SLEEP
);
1558 VERIFY(mcl_audit_con_cache
!= NULL
);
1560 mclverify
= (mbuf_debug
& MCF_VERIFY
);
1561 mcltrace
= (mbuf_debug
& MCF_TRACE
);
1562 mclfindleak
= !(mbuf_debug
& MCF_NOLEAKLOG
);
1563 mclexpleak
= mclfindleak
&& (mbuf_debug
& MCF_EXPLEAKLOG
);
1565 /* Enable mbuf leak logging, with a lock to protect the tables */
1567 mleak_lock_grp_attr
= lck_grp_attr_alloc_init();
1568 mleak_lock_grp
= lck_grp_alloc_init("mleak_lock", mleak_lock_grp_attr
);
1569 mleak_lock_attr
= lck_attr_alloc_init();
1570 lck_mtx_init(mleak_lock
, mleak_lock_grp
, mleak_lock_attr
);
1574 /* Calculate the number of pages assigned to the cluster pool */
1575 mcl_pages
= (nmbclusters
<< MCLSHIFT
) / PAGE_SIZE
;
1576 MALLOC(mcl_paddr
, ppnum_t
*, mcl_pages
* sizeof (ppnum_t
),
1578 VERIFY(mcl_paddr
!= NULL
);
1580 /* Register with the I/O Bus mapper */
1581 mcl_paddr_base
= IOMapperIOVMAlloc(mcl_pages
);
1582 bzero((char *)mcl_paddr
, mcl_pages
* sizeof (ppnum_t
));
1584 embutl
= (mbutl
+ (nmbclusters
* MCLBYTES
));
1585 VERIFY(((embutl
- mbutl
) % MBIGCLBYTES
) == 0);
1587 /* Prime up the freelist */
1588 PE_parse_boot_argn("initmcl", &initmcl
, sizeof (initmcl
));
1590 initmcl
>>= NCLPBGSHIFT
; /* become a 4K unit */
1591 if (initmcl
> m_maxlimit(MC_BIGCL
))
1592 initmcl
= m_maxlimit(MC_BIGCL
);
1594 if (initmcl
< m_minlimit(MC_BIGCL
))
1595 initmcl
= m_minlimit(MC_BIGCL
);
1597 lck_mtx_lock(mbuf_mlock
);
1600 * For classes with non-zero minimum limits, populate their freelists
1601 * so that m_total(class) is at least m_minlimit(class).
1603 VERIFY(m_total(MC_BIGCL
) == 0 && m_minlimit(MC_BIGCL
) != 0);
1604 freelist_populate(m_class(MC_BIGCL
), initmcl
, M_WAIT
);
1605 VERIFY(m_total(MC_BIGCL
) >= m_minlimit(MC_BIGCL
));
1606 freelist_init(m_class(MC_CL
));
1608 for (m
= 0; m
< NELEM(mbuf_table
); m
++) {
1609 /* Make sure we didn't miss any */
1610 VERIFY(m_minlimit(m_class(m
)) == 0 ||
1611 m_total(m_class(m
)) >= m_minlimit(m_class(m
)));
1613 /* populate the initial sizes and report from there on */
1614 m_peak(m_class(m
)) = m_total(m_class(m
));
1616 mb_peak_newreport
= FALSE
;
1618 lck_mtx_unlock(mbuf_mlock
);
1620 (void) kernel_thread_start((thread_continue_t
)mbuf_worker_thread_init
,
1622 thread_deallocate(thread
);
1624 ref_cache
= mcache_create("mext_ref", sizeof (struct ext_ref
),
1627 /* Create the cache for each class */
1628 for (m
= 0; m
< NELEM(mbuf_table
); m
++) {
1629 void *allocfunc
, *freefunc
, *auditfunc
, *logfunc
;
1633 if (m_class(m
) == MC_MBUF_CL
|| m_class(m
) == MC_MBUF_BIGCL
||
1634 m_class(m
) == MC_MBUF_16KCL
) {
1635 allocfunc
= mbuf_cslab_alloc
;
1636 freefunc
= mbuf_cslab_free
;
1637 auditfunc
= mbuf_cslab_audit
;
1638 logfunc
= mleak_logger
;
1640 allocfunc
= mbuf_slab_alloc
;
1641 freefunc
= mbuf_slab_free
;
1642 auditfunc
= mbuf_slab_audit
;
1643 logfunc
= mleak_logger
;
1647 * Disable per-CPU caches for jumbo classes if there
1648 * is no jumbo cluster pool available in the system.
1649 * The cache itself is still created (but will never
1650 * be populated) since it simplifies the code.
1652 if ((m_class(m
) == MC_MBUF_16KCL
|| m_class(m
) == MC_16KCL
) &&
1654 flags
|= MCF_NOCPUCACHE
;
1657 flags
|= MCF_NOLEAKLOG
;
1659 m_cache(m
) = mcache_create_ext(m_cname(m
), m_maxsize(m
),
1660 allocfunc
, freefunc
, auditfunc
, logfunc
, mbuf_slab_notify
,
1661 (void *)(uintptr_t)m
, flags
, MCR_SLEEP
);
1665 * Allocate structure for per-CPU statistics that's aligned
1666 * on the CPU cache boundary; this code assumes that we never
1667 * uninitialize this framework, since the original address
1668 * before alignment is not saved.
1670 ncpu
= ml_get_max_cpus();
1671 MALLOC(buf
, void *, MBUF_MTYPES_SIZE(ncpu
) + CPU_CACHE_LINE_SIZE
,
1673 VERIFY(buf
!= NULL
);
1675 mbuf_mtypes
= (mbuf_mtypes_t
*)P2ROUNDUP((intptr_t)buf
,
1676 CPU_CACHE_LINE_SIZE
);
1677 bzero(mbuf_mtypes
, MBUF_MTYPES_SIZE(ncpu
));
1680 * Set the max limit on sb_max to be 1/16 th of the size of
1681 * memory allocated for mbuf clusters.
1683 high_sb_max
= (nmbclusters
<< (MCLSHIFT
- 4));
1684 if (high_sb_max
< sb_max
) {
1685 /* sb_max is too large for this configuration, scale it down */
1686 if (high_sb_max
> (1 << MBSHIFT
)) {
1687 /* We have atleast 16 M of mbuf pool */
1688 sb_max
= high_sb_max
;
1689 } else if ((nmbclusters
<< MCLSHIFT
) > (1 << MBSHIFT
)) {
1691 * If we have more than 1M of mbufpool, cap the size of
1692 * max sock buf at 1M
1694 sb_max
= high_sb_max
= (1 << MBSHIFT
);
1696 sb_max
= high_sb_max
;
1700 /* allocate space for mbuf_dump_buf */
1701 MALLOC(mbuf_dump_buf
, char *, MBUF_DUMP_BUF_SIZE
, M_TEMP
, M_WAITOK
);
1702 VERIFY(mbuf_dump_buf
!= NULL
);
1704 if (mbuf_debug
& MCF_DEBUG
) {
1705 printf("%s: MLEN %d, MHLEN %d\n", __func__
,
1706 (int)_MLEN
, (int)_MHLEN
);
1709 printf("%s: done [%d MB total pool size, (%d/%d) split]\n", __func__
,
1710 (nmbclusters
<< MCLSHIFT
) >> MBSHIFT
,
1711 (nclusters
<< MCLSHIFT
) >> MBSHIFT
,
1712 (njcl
<< MCLSHIFT
) >> MBSHIFT
);
1714 /* initialize lock form tx completion callback table */
1715 mbuf_tx_compl_tbl_lck_grp_attr
= lck_grp_attr_alloc_init();
1716 if (mbuf_tx_compl_tbl_lck_grp_attr
== NULL
) {
1717 panic("%s: lck_grp_attr_alloc_init failed", __func__
);
1720 mbuf_tx_compl_tbl_lck_grp
= lck_grp_alloc_init("mbuf_tx_compl_tbl",
1721 mbuf_tx_compl_tbl_lck_grp_attr
);
1722 if (mbuf_tx_compl_tbl_lck_grp
== NULL
) {
1723 panic("%s: lck_grp_alloc_init failed", __func__
);
1726 mbuf_tx_compl_tbl_lck_attr
= lck_attr_alloc_init();
1727 if (mbuf_tx_compl_tbl_lck_attr
== NULL
) {
1728 panic("%s: lck_attr_alloc_init failed", __func__
);
1731 lck_rw_init(mbuf_tx_compl_tbl_lock
, mbuf_tx_compl_tbl_lck_grp
,
1732 mbuf_tx_compl_tbl_lck_attr
);
1737 * Obtain a slab of object(s) from the class's freelist.
1739 static mcache_obj_t
*
1740 slab_alloc(mbuf_class_t
class, int wait
)
1745 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
1747 /* This should always be NULL for us */
1748 VERIFY(m_cobjlist(class) == NULL
);
1751 * Treat composite objects as having longer lifespan by using
1752 * a slab from the reverse direction, in hoping that this could
1753 * reduce the probability of fragmentation for slabs that hold
1754 * more than one buffer chunks (e.g. mbuf slabs). For other
1755 * slabs, this probably doesn't make much of a difference.
1757 if ((class == MC_MBUF
|| class == MC_CL
|| class == MC_BIGCL
)
1758 && (wait
& MCR_COMP
))
1759 sp
= (mcl_slab_t
*)TAILQ_LAST(&m_slablist(class), mcl_slhead
);
1761 sp
= (mcl_slab_t
*)TAILQ_FIRST(&m_slablist(class));
1764 VERIFY(m_infree(class) == 0 && m_slab_cnt(class) == 0);
1765 /* The slab list for this class is empty */
1769 VERIFY(m_infree(class) > 0);
1770 VERIFY(!slab_is_detached(sp
));
1771 VERIFY(sp
->sl_class
== class &&
1772 (sp
->sl_flags
& (SLF_MAPPED
| SLF_PARTIAL
)) == SLF_MAPPED
);
1774 VERIFY(slab_inrange(sp
, buf
) && sp
== slab_get(buf
));
1775 sp
->sl_head
= buf
->obj_next
;
1776 /* Increment slab reference */
1779 VERIFY(sp
->sl_head
!= NULL
|| sp
->sl_refcnt
== sp
->sl_chunks
);
1781 if (sp
->sl_head
!= NULL
&& !slab_inrange(sp
, sp
->sl_head
)) {
1782 slab_nextptr_panic(sp
, sp
->sl_head
);
1783 /* In case sl_head is in the map but not in the slab */
1784 VERIFY(slab_inrange(sp
, sp
->sl_head
));
1788 if (mclaudit
!= NULL
) {
1789 mcache_audit_t
*mca
= mcl_audit_buf2mca(class, buf
);
1790 mca
->mca_uflags
= 0;
1791 /* Save contents on mbuf objects only */
1792 if (class == MC_MBUF
)
1793 mca
->mca_uflags
|= MB_SCVALID
;
1796 if (class == MC_CL
) {
1797 mbstat
.m_clfree
= (--m_infree(MC_CL
)) + m_infree(MC_MBUF_CL
);
1799 * A 2K cluster slab can have at most NCLPG references.
1801 VERIFY(sp
->sl_refcnt
>= 1 && sp
->sl_refcnt
<= NCLPG
&&
1802 sp
->sl_chunks
== NCLPG
&& sp
->sl_len
== PAGE_SIZE
);
1803 VERIFY(sp
->sl_refcnt
< NCLPG
|| sp
->sl_head
== NULL
);
1804 } else if (class == MC_BIGCL
) {
1805 mbstat
.m_bigclfree
= (--m_infree(MC_BIGCL
)) +
1806 m_infree(MC_MBUF_BIGCL
);
1808 * A 4K cluster slab can have NBCLPG references.
1810 VERIFY(sp
->sl_refcnt
>= 1 && sp
->sl_chunks
== NBCLPG
&&
1811 sp
->sl_len
== PAGE_SIZE
&&
1812 (sp
->sl_refcnt
< NBCLPG
|| sp
->sl_head
== NULL
));
1813 } else if (class == MC_16KCL
) {
1817 --m_infree(MC_16KCL
);
1818 VERIFY(sp
->sl_refcnt
== 1 && sp
->sl_chunks
== 1 &&
1819 sp
->sl_len
== m_maxsize(class) && sp
->sl_head
== NULL
);
1821 * Increment 2nd-Nth slab reference, where N is NSLABSP16KB.
1822 * A 16KB big cluster takes NSLABSP16KB slabs, each having at
1825 for (nsp
= sp
, k
= 1; k
< NSLABSP16KB
; k
++) {
1827 /* Next slab must already be present */
1828 VERIFY(nsp
!= NULL
);
1830 VERIFY(!slab_is_detached(nsp
));
1831 VERIFY(nsp
->sl_class
== MC_16KCL
&&
1832 nsp
->sl_flags
== (SLF_MAPPED
| SLF_PARTIAL
) &&
1833 nsp
->sl_refcnt
== 1 && nsp
->sl_chunks
== 0 &&
1834 nsp
->sl_len
== 0 && nsp
->sl_base
== sp
->sl_base
&&
1835 nsp
->sl_head
== NULL
);
1838 VERIFY(class == MC_MBUF
);
1839 --m_infree(MC_MBUF
);
1841 * If auditing is turned on, this check is
1842 * deferred until later in mbuf_slab_audit().
1844 if (mclaudit
== NULL
)
1845 _MCHECK((struct mbuf
*)buf
);
1847 * Since we have incremented the reference count above,
1848 * an mbuf slab (formerly a 4KB cluster slab that was cut
1849 * up into mbufs) must have a reference count between 1
1850 * and NMBPG at this point.
1852 VERIFY(sp
->sl_refcnt
>= 1 && sp
->sl_refcnt
<= NMBPG
&&
1853 sp
->sl_chunks
== NMBPG
&&
1854 sp
->sl_len
== PAGE_SIZE
);
1855 VERIFY(sp
->sl_refcnt
< NMBPG
|| sp
->sl_head
== NULL
);
1858 /* If empty, remove this slab from the class's freelist */
1859 if (sp
->sl_head
== NULL
) {
1860 VERIFY(class != MC_MBUF
|| sp
->sl_refcnt
== NMBPG
);
1861 VERIFY(class != MC_CL
|| sp
->sl_refcnt
== NCLPG
);
1862 VERIFY(class != MC_BIGCL
|| sp
->sl_refcnt
== NBCLPG
);
1863 slab_remove(sp
, class);
1870 * Place a slab of object(s) back into a class's slab list.
1873 slab_free(mbuf_class_t
class, mcache_obj_t
*buf
)
1876 boolean_t reinit_supercl
= false;
1877 mbuf_class_t super_class
;
1879 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
1881 VERIFY(class != MC_16KCL
|| njcl
> 0);
1882 VERIFY(buf
->obj_next
== NULL
);
1885 VERIFY(sp
->sl_class
== class && slab_inrange(sp
, buf
) &&
1886 (sp
->sl_flags
& (SLF_MAPPED
| SLF_PARTIAL
)) == SLF_MAPPED
);
1888 /* Decrement slab reference */
1891 if (class == MC_CL
) {
1892 VERIFY(IS_P2ALIGNED(buf
, MCLBYTES
));
1894 * A slab that has been splitted for 2KB clusters can have
1895 * at most 1 outstanding reference at this point.
1897 VERIFY(sp
->sl_refcnt
>= 0 && sp
->sl_refcnt
<= (NCLPG
- 1) &&
1898 sp
->sl_chunks
== NCLPG
&& sp
->sl_len
== PAGE_SIZE
);
1899 VERIFY(sp
->sl_refcnt
< (NCLPG
- 1) ||
1900 (slab_is_detached(sp
) && sp
->sl_head
== NULL
));
1901 } else if (class == MC_BIGCL
) {
1902 VERIFY(IS_P2ALIGNED(buf
, MBIGCLBYTES
));
1904 /* A 4KB cluster slab can have NBCLPG references at most */
1905 VERIFY(sp
->sl_refcnt
>= 0 && sp
->sl_chunks
== NBCLPG
);
1906 VERIFY(sp
->sl_refcnt
< (NBCLPG
- 1) ||
1907 (slab_is_detached(sp
) && sp
->sl_head
== NULL
));
1908 } else if (class == MC_16KCL
) {
1912 * A 16KB cluster takes NSLABSP16KB slabs, all must
1913 * now have 0 reference.
1915 VERIFY(IS_P2ALIGNED(buf
, PAGE_SIZE
));
1916 VERIFY(sp
->sl_refcnt
== 0 && sp
->sl_chunks
== 1 &&
1917 sp
->sl_len
== m_maxsize(class) && sp
->sl_head
== NULL
);
1918 VERIFY(slab_is_detached(sp
));
1919 for (nsp
= sp
, k
= 1; k
< NSLABSP16KB
; k
++) {
1921 /* Next slab must already be present */
1922 VERIFY(nsp
!= NULL
);
1924 VERIFY(slab_is_detached(nsp
));
1925 VERIFY(nsp
->sl_class
== MC_16KCL
&&
1926 (nsp
->sl_flags
& (SLF_MAPPED
| SLF_PARTIAL
)) &&
1927 nsp
->sl_refcnt
== 0 && nsp
->sl_chunks
== 0 &&
1928 nsp
->sl_len
== 0 && nsp
->sl_base
== sp
->sl_base
&&
1929 nsp
->sl_head
== NULL
);
1933 * A slab that has been splitted for mbufs has at most
1934 * NMBPG reference counts. Since we have decremented
1935 * one reference above, it must now be between 0 and
1938 VERIFY(class == MC_MBUF
);
1939 VERIFY(sp
->sl_refcnt
>= 0 &&
1940 sp
->sl_refcnt
<= (NMBPG
- 1) &&
1941 sp
->sl_chunks
== NMBPG
&&
1942 sp
->sl_len
== PAGE_SIZE
);
1943 VERIFY(sp
->sl_refcnt
< (NMBPG
- 1) ||
1944 (slab_is_detached(sp
) && sp
->sl_head
== NULL
));
1948 * When auditing is enabled, ensure that the buffer still
1949 * contains the free pattern. Otherwise it got corrupted
1950 * while at the CPU cache layer.
1952 if (mclaudit
!= NULL
) {
1953 mcache_audit_t
*mca
= mcl_audit_buf2mca(class, buf
);
1955 mcache_audit_free_verify(mca
, buf
, 0,
1958 mca
->mca_uflags
&= ~MB_SCVALID
;
1961 if (class == MC_CL
) {
1962 mbstat
.m_clfree
= (++m_infree(MC_CL
)) + m_infree(MC_MBUF_CL
);
1963 buf
->obj_next
= sp
->sl_head
;
1964 } else if (class == MC_BIGCL
) {
1965 mbstat
.m_bigclfree
= (++m_infree(MC_BIGCL
)) +
1966 m_infree(MC_MBUF_BIGCL
);
1967 buf
->obj_next
= sp
->sl_head
;
1968 } else if (class == MC_16KCL
) {
1969 ++m_infree(MC_16KCL
);
1971 ++m_infree(MC_MBUF
);
1972 buf
->obj_next
= sp
->sl_head
;
1977 * If a slab has been split to either one which holds 2KB clusters,
1978 * or one which holds mbufs, turn it back to one which holds a
1979 * 4 or 16 KB cluster depending on the page size.
1981 if (m_maxsize(MC_BIGCL
) == PAGE_SIZE
) {
1982 super_class
= MC_BIGCL
;
1984 VERIFY(PAGE_SIZE
== m_maxsize(MC_16KCL
));
1985 super_class
= MC_16KCL
;
1987 if (class == MC_MBUF
&& sp
->sl_refcnt
== 0 &&
1988 m_total(class) >= (m_minlimit(class) + NMBPG
) &&
1989 m_total(super_class
) < m_maxlimit(super_class
)) {
1992 m_total(MC_MBUF
) -= NMBPG
;
1993 mbstat
.m_mbufs
= m_total(MC_MBUF
);
1994 m_infree(MC_MBUF
) -= NMBPG
;
1995 mtype_stat_add(MT_FREE
, -((unsigned)NMBPG
));
1998 struct mbuf
*m
= sp
->sl_head
;
2000 sp
->sl_head
= m
->m_next
;
2003 reinit_supercl
= true;
2004 } else if (class == MC_CL
&& sp
->sl_refcnt
== 0 &&
2005 m_total(class) >= (m_minlimit(class) + NCLPG
) &&
2006 m_total(super_class
) < m_maxlimit(super_class
)) {
2009 m_total(MC_CL
) -= NCLPG
;
2010 mbstat
.m_clusters
= m_total(MC_CL
);
2011 m_infree(MC_CL
) -= NCLPG
;
2014 union mcluster
*c
= sp
->sl_head
;
2016 sp
->sl_head
= c
->mcl_next
;
2019 reinit_supercl
= true;
2020 } else if (class == MC_BIGCL
&& super_class
!= MC_BIGCL
&&
2021 sp
->sl_refcnt
== 0 &&
2022 m_total(class) >= (m_minlimit(class) + NBCLPG
) &&
2023 m_total(super_class
) < m_maxlimit(super_class
)) {
2026 VERIFY(super_class
== MC_16KCL
);
2027 m_total(MC_BIGCL
) -= NBCLPG
;
2028 mbstat
.m_bigclusters
= m_total(MC_BIGCL
);
2029 m_infree(MC_BIGCL
) -= NBCLPG
;
2032 union mbigcluster
*bc
= sp
->sl_head
;
2034 sp
->sl_head
= bc
->mbc_next
;
2035 bc
->mbc_next
= NULL
;
2037 reinit_supercl
= true;
2040 if (reinit_supercl
) {
2041 VERIFY(sp
->sl_head
== NULL
);
2042 VERIFY(m_total(class) >= m_minlimit(class));
2043 slab_remove(sp
, class);
2045 /* Reinitialize it as a cluster for the super class */
2046 m_total(super_class
)++;
2047 m_infree(super_class
)++;
2048 VERIFY(sp
->sl_flags
== (SLF_MAPPED
| SLF_DETACHED
) &&
2049 sp
->sl_len
== PAGE_SIZE
&& sp
->sl_refcnt
== 0);
2051 slab_init(sp
, super_class
, SLF_MAPPED
, sp
->sl_base
,
2052 sp
->sl_base
, PAGE_SIZE
, 0, 1);
2054 mcache_set_pattern(MCACHE_FREE_PATTERN
,
2055 (caddr_t
)sp
->sl_base
, sp
->sl_len
);
2056 ((mcache_obj_t
*)(sp
->sl_base
))->obj_next
= NULL
;
2058 if (super_class
== MC_BIGCL
) {
2059 mbstat
.m_bigclusters
= m_total(MC_BIGCL
);
2060 mbstat
.m_bigclfree
= m_infree(MC_BIGCL
) +
2061 m_infree(MC_MBUF_BIGCL
);
2064 VERIFY(slab_is_detached(sp
));
2065 VERIFY(m_total(super_class
) <= m_maxlimit(super_class
));
2067 /* And finally switch class */
2068 class = super_class
;
2071 /* Reinsert the slab to the class's slab list */
2072 if (slab_is_detached(sp
))
2073 slab_insert(sp
, class);
2077 * Common allocator for rudimentary objects called by the CPU cache layer
2078 * during an allocation request whenever there is no available element in the
2079 * bucket layer. It returns one or more elements from the appropriate global
2080 * freelist. If the freelist is empty, it will attempt to populate it and
2081 * retry the allocation.
2084 mbuf_slab_alloc(void *arg
, mcache_obj_t
***plist
, unsigned int num
, int wait
)
2086 mbuf_class_t
class = (mbuf_class_t
)arg
;
2087 unsigned int need
= num
;
2088 mcache_obj_t
**list
= *plist
;
2090 ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class));
2093 lck_mtx_lock(mbuf_mlock
);
2096 if ((*list
= slab_alloc(class, wait
)) != NULL
) {
2097 (*list
)->obj_next
= NULL
;
2098 list
= *plist
= &(*list
)->obj_next
;
2102 * If the number of elements in freelist has
2103 * dropped below low watermark, asynchronously
2104 * populate the freelist now rather than doing
2105 * it later when we run out of elements.
2107 if (!mbuf_cached_above(class, wait
) &&
2108 m_infree(class) < (m_total(class) >> 5)) {
2109 (void) freelist_populate(class, 1,
2115 VERIFY(m_infree(class) == 0 || class == MC_CL
);
2117 (void) freelist_populate(class, 1,
2118 (wait
& MCR_NOSLEEP
) ? M_DONTWAIT
: M_WAIT
);
2120 if (m_infree(class) > 0)
2123 /* Check if there's anything at the cache layer */
2124 if (mbuf_cached_above(class, wait
))
2127 /* watchdog checkpoint */
2130 /* We have nothing and cannot block; give up */
2131 if (wait
& MCR_NOSLEEP
) {
2132 if (!(wait
& MCR_TRYHARD
)) {
2133 m_fail_cnt(class)++;
2140 * If the freelist is still empty and the caller is
2141 * willing to be blocked, sleep on the wait channel
2142 * until an element is available. Otherwise, if
2143 * MCR_TRYHARD is set, do our best to satisfy the
2144 * request without having to go to sleep.
2146 if (mbuf_worker_ready
&&
2147 mbuf_sleep(class, need
, wait
))
2150 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
2154 m_alloc_cnt(class) += num
- need
;
2155 lck_mtx_unlock(mbuf_mlock
);
2157 return (num
- need
);
2161 * Common de-allocator for rudimentary objects called by the CPU cache
2162 * layer when one or more elements need to be returned to the appropriate
2166 mbuf_slab_free(void *arg
, mcache_obj_t
*list
, __unused
int purged
)
2168 mbuf_class_t
class = (mbuf_class_t
)arg
;
2169 mcache_obj_t
*nlist
;
2170 unsigned int num
= 0;
2173 ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class));
2175 lck_mtx_lock(mbuf_mlock
);
2178 nlist
= list
->obj_next
;
2179 list
->obj_next
= NULL
;
2180 slab_free(class, list
);
2182 if ((list
= nlist
) == NULL
)
2185 m_free_cnt(class) += num
;
2187 if ((w
= mb_waiters
) > 0)
2190 lck_mtx_unlock(mbuf_mlock
);
2193 wakeup(mb_waitchan
);
2197 * Common auditor for rudimentary objects called by the CPU cache layer
2198 * during an allocation or free request. For the former, this is called
2199 * after the objects are obtained from either the bucket or slab layer
2200 * and before they are returned to the caller. For the latter, this is
2201 * called immediately during free and before placing the objects into
2202 * the bucket or slab layer.
2205 mbuf_slab_audit(void *arg
, mcache_obj_t
*list
, boolean_t alloc
)
2207 mbuf_class_t
class = (mbuf_class_t
)arg
;
2208 mcache_audit_t
*mca
;
2210 ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class));
2212 while (list
!= NULL
) {
2213 lck_mtx_lock(mbuf_mlock
);
2214 mca
= mcl_audit_buf2mca(class, list
);
2216 /* Do the sanity checks */
2217 if (class == MC_MBUF
) {
2218 mcl_audit_mbuf(mca
, list
, FALSE
, alloc
);
2219 ASSERT(mca
->mca_uflags
& MB_SCVALID
);
2221 mcl_audit_cluster(mca
, list
, m_maxsize(class),
2223 ASSERT(!(mca
->mca_uflags
& MB_SCVALID
));
2225 /* Record this transaction */
2227 mcache_buffer_log(mca
, list
, m_cache(class), &mb_start
);
2230 mca
->mca_uflags
|= MB_INUSE
;
2232 mca
->mca_uflags
&= ~MB_INUSE
;
2233 /* Unpair the object (unconditionally) */
2234 mca
->mca_uptr
= NULL
;
2235 lck_mtx_unlock(mbuf_mlock
);
2237 list
= list
->obj_next
;
2242 * Common notify routine for all caches. It is called by mcache when
2243 * one or more objects get freed. We use this indication to trigger
2244 * the wakeup of any sleeping threads so that they can retry their
2245 * allocation requests.
2248 mbuf_slab_notify(void *arg
, u_int32_t reason
)
2250 mbuf_class_t
class = (mbuf_class_t
)arg
;
2253 ASSERT(MBUF_CLASS_VALID(class));
2255 if (reason
!= MCN_RETRYALLOC
)
2258 lck_mtx_lock(mbuf_mlock
);
2259 if ((w
= mb_waiters
) > 0) {
2260 m_notified(class)++;
2263 lck_mtx_unlock(mbuf_mlock
);
2266 wakeup(mb_waitchan
);
2270 * Obtain object(s) from the composite class's freelist.
2273 cslab_alloc(mbuf_class_t
class, mcache_obj_t
***plist
, unsigned int num
)
2275 unsigned int need
= num
;
2276 mcl_slab_t
*sp
, *clsp
, *nsp
;
2278 mcache_obj_t
**list
= *plist
;
2282 VERIFY(class != MC_MBUF_16KCL
|| njcl
> 0);
2283 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
2285 /* Get what we can from the freelist */
2286 while ((*list
= m_cobjlist(class)) != NULL
) {
2289 m
= (struct mbuf
*)*list
;
2291 cl
= m
->m_ext
.ext_buf
;
2292 clsp
= slab_get(cl
);
2293 VERIFY(m
->m_flags
== M_EXT
&& cl
!= NULL
);
2294 VERIFY(m_get_rfa(m
) != NULL
&& MBUF_IS_COMPOSITE(m
));
2296 if (class == MC_MBUF_CL
) {
2297 VERIFY(clsp
->sl_refcnt
>= 1 &&
2298 clsp
->sl_refcnt
<= NCLPG
);
2300 VERIFY(clsp
->sl_refcnt
>= 1 &&
2301 clsp
->sl_refcnt
<= NBCLPG
);
2304 if (class == MC_MBUF_16KCL
) {
2306 for (nsp
= clsp
, k
= 1; k
< NSLABSP16KB
; k
++) {
2308 /* Next slab must already be present */
2309 VERIFY(nsp
!= NULL
);
2310 VERIFY(nsp
->sl_refcnt
== 1);
2314 if ((m_cobjlist(class) = (*list
)->obj_next
) != NULL
&&
2315 !MBUF_IN_MAP(m_cobjlist(class))) {
2316 slab_nextptr_panic(sp
, m_cobjlist(class));
2319 (*list
)->obj_next
= NULL
;
2320 list
= *plist
= &(*list
)->obj_next
;
2325 m_infree(class) -= (num
- need
);
2327 return (num
- need
);
2331 * Place object(s) back into a composite class's freelist.
2334 cslab_free(mbuf_class_t
class, mcache_obj_t
*list
, int purged
)
2336 mcache_obj_t
*o
, *tail
;
2337 unsigned int num
= 0;
2338 struct mbuf
*m
, *ms
;
2339 mcache_audit_t
*mca
= NULL
;
2340 mcache_obj_t
*ref_list
= NULL
;
2341 mcl_slab_t
*clsp
, *nsp
;
2343 mbuf_class_t cl_class
;
2345 ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
2346 VERIFY(class != MC_MBUF_16KCL
|| njcl
> 0);
2347 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
2349 if (class == MC_MBUF_CL
) {
2351 } else if (class == MC_MBUF_BIGCL
) {
2352 cl_class
= MC_BIGCL
;
2354 VERIFY(class == MC_MBUF_16KCL
);
2355 cl_class
= MC_16KCL
;
2360 while ((m
= ms
= (struct mbuf
*)o
) != NULL
) {
2361 mcache_obj_t
*rfa
, *nexto
= o
->obj_next
;
2363 /* Do the mbuf sanity checks */
2364 if (mclaudit
!= NULL
) {
2365 mca
= mcl_audit_buf2mca(MC_MBUF
, (mcache_obj_t
*)m
);
2367 mcache_audit_free_verify(mca
, m
, 0,
2368 m_maxsize(MC_MBUF
));
2370 ms
= MCA_SAVED_MBUF_PTR(mca
);
2373 /* Do the cluster sanity checks */
2374 cl
= ms
->m_ext
.ext_buf
;
2375 clsp
= slab_get(cl
);
2377 size_t size
= m_maxsize(cl_class
);
2378 mcache_audit_free_verify(mcl_audit_buf2mca(cl_class
,
2379 (mcache_obj_t
*)cl
), cl
, 0, size
);
2381 VERIFY(ms
->m_type
== MT_FREE
);
2382 VERIFY(ms
->m_flags
== M_EXT
);
2383 VERIFY(m_get_rfa(ms
) != NULL
&& MBUF_IS_COMPOSITE(ms
));
2384 if (cl_class
== MC_CL
) {
2385 VERIFY(clsp
->sl_refcnt
>= 1 &&
2386 clsp
->sl_refcnt
<= NCLPG
);
2388 VERIFY(clsp
->sl_refcnt
>= 1 &&
2389 clsp
->sl_refcnt
<= NBCLPG
);
2391 if (cl_class
== MC_16KCL
) {
2393 for (nsp
= clsp
, k
= 1; k
< NSLABSP16KB
; k
++) {
2395 /* Next slab must already be present */
2396 VERIFY(nsp
!= NULL
);
2397 VERIFY(nsp
->sl_refcnt
== 1);
2402 * If we're asked to purge, restore the actual mbuf using
2403 * contents of the shadow structure (if auditing is enabled)
2404 * and clear EXTF_COMPOSITE flag from the mbuf, as we are
2405 * about to free it and the attached cluster into their caches.
2408 /* Restore constructed mbuf fields */
2409 if (mclaudit
!= NULL
)
2410 mcl_audit_restore_mbuf(m
, mca
, TRUE
);
2417 MEXT_PMBUF(m
) = NULL
;
2420 rfa
= (mcache_obj_t
*)(void *)m_get_rfa(m
);
2421 m_set_ext(m
, NULL
, NULL
, NULL
);
2422 rfa
->obj_next
= ref_list
;
2425 m
->m_type
= MT_FREE
;
2426 m
->m_flags
= m
->m_len
= 0;
2427 m
->m_next
= m
->m_nextpkt
= NULL
;
2429 /* Save mbuf fields and make auditing happy */
2430 if (mclaudit
!= NULL
)
2431 mcl_audit_mbuf(mca
, o
, FALSE
, FALSE
);
2433 VERIFY(m_total(class) > 0);
2438 slab_free(MC_MBUF
, o
);
2440 /* And free the cluster */
2441 ((mcache_obj_t
*)cl
)->obj_next
= NULL
;
2442 if (class == MC_MBUF_CL
)
2443 slab_free(MC_CL
, cl
);
2444 else if (class == MC_MBUF_BIGCL
)
2445 slab_free(MC_BIGCL
, cl
);
2447 slab_free(MC_16KCL
, cl
);
2456 tail
->obj_next
= m_cobjlist(class);
2457 m_cobjlist(class) = list
;
2458 m_infree(class) += num
;
2459 } else if (ref_list
!= NULL
) {
2460 mcache_free_ext(ref_cache
, ref_list
);
2467 * Common allocator for composite objects called by the CPU cache layer
2468 * during an allocation request whenever there is no available element in
2469 * the bucket layer. It returns one or more composite elements from the
2470 * appropriate global freelist. If the freelist is empty, it will attempt
2471 * to obtain the rudimentary objects from their caches and construct them
2472 * into composite mbuf + cluster objects.
2475 mbuf_cslab_alloc(void *arg
, mcache_obj_t
***plist
, unsigned int needed
,
2478 mbuf_class_t
class = (mbuf_class_t
)arg
;
2479 mbuf_class_t cl_class
= 0;
2480 unsigned int num
= 0, cnum
= 0, want
= needed
;
2481 mcache_obj_t
*ref_list
= NULL
;
2482 mcache_obj_t
*mp_list
= NULL
;
2483 mcache_obj_t
*clp_list
= NULL
;
2484 mcache_obj_t
**list
;
2485 struct ext_ref
*rfa
;
2489 ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
2492 VERIFY(class != MC_MBUF_16KCL
|| njcl
> 0);
2494 /* There should not be any slab for this class */
2495 VERIFY(m_slab_cnt(class) == 0 &&
2496 m_slablist(class).tqh_first
== NULL
&&
2497 m_slablist(class).tqh_last
== NULL
);
2499 lck_mtx_lock(mbuf_mlock
);
2501 /* Try using the freelist first */
2502 num
= cslab_alloc(class, plist
, needed
);
2504 if (num
== needed
) {
2505 m_alloc_cnt(class) += num
;
2506 lck_mtx_unlock(mbuf_mlock
);
2510 lck_mtx_unlock(mbuf_mlock
);
2513 * We could not satisfy the request using the freelist alone;
2514 * allocate from the appropriate rudimentary caches and use
2515 * whatever we can get to construct the composite objects.
2520 * Mark these allocation requests as coming from a composite cache.
2521 * Also, if the caller is willing to be blocked, mark the request
2522 * with MCR_FAILOK such that we don't end up sleeping at the mbuf
2523 * slab layer waiting for the individual object when one or more
2524 * of the already-constructed composite objects are available.
2527 if (!(wait
& MCR_NOSLEEP
))
2530 /* allocate mbufs */
2531 needed
= mcache_alloc_ext(m_cache(MC_MBUF
), &mp_list
, needed
, wait
);
2533 ASSERT(mp_list
== NULL
);
2537 /* allocate clusters */
2538 if (class == MC_MBUF_CL
) {
2540 } else if (class == MC_MBUF_BIGCL
) {
2541 cl_class
= MC_BIGCL
;
2543 VERIFY(class == MC_MBUF_16KCL
);
2544 cl_class
= MC_16KCL
;
2546 needed
= mcache_alloc_ext(m_cache(cl_class
), &clp_list
, needed
, wait
);
2548 ASSERT(clp_list
== NULL
);
2552 needed
= mcache_alloc_ext(ref_cache
, &ref_list
, needed
, wait
);
2554 ASSERT(ref_list
== NULL
);
2559 * By this time "needed" is MIN(mbuf, cluster, ref). Any left
2560 * overs will get freed accordingly before we return to caller.
2562 for (cnum
= 0; cnum
< needed
; cnum
++) {
2565 m
= ms
= (struct mbuf
*)mp_list
;
2566 mp_list
= mp_list
->obj_next
;
2569 clp_list
= clp_list
->obj_next
;
2570 ((mcache_obj_t
*)cl
)->obj_next
= NULL
;
2572 rfa
= (struct ext_ref
*)ref_list
;
2573 ref_list
= ref_list
->obj_next
;
2574 ((mcache_obj_t
*)(void *)rfa
)->obj_next
= NULL
;
2577 * If auditing is enabled, construct the shadow mbuf
2578 * in the audit structure instead of in the actual one.
2579 * mbuf_cslab_audit() will take care of restoring the
2580 * contents after the integrity check.
2582 if (mclaudit
!= NULL
) {
2583 mcache_audit_t
*mca
, *cl_mca
;
2585 lck_mtx_lock(mbuf_mlock
);
2586 mca
= mcl_audit_buf2mca(MC_MBUF
, (mcache_obj_t
*)m
);
2587 ms
= MCA_SAVED_MBUF_PTR(mca
);
2588 cl_mca
= mcl_audit_buf2mca(cl_class
,
2589 (mcache_obj_t
*)cl
);
2592 * Pair them up. Note that this is done at the time
2593 * the mbuf+cluster objects are constructed. This
2594 * information should be treated as "best effort"
2595 * debugging hint since more than one mbufs can refer
2596 * to a cluster. In that case, the cluster might not
2597 * be freed along with the mbuf it was paired with.
2599 mca
->mca_uptr
= cl_mca
;
2600 cl_mca
->mca_uptr
= mca
;
2602 ASSERT(mca
->mca_uflags
& MB_SCVALID
);
2603 ASSERT(!(cl_mca
->mca_uflags
& MB_SCVALID
));
2604 lck_mtx_unlock(mbuf_mlock
);
2606 /* Technically, they are in the freelist */
2610 mcache_set_pattern(MCACHE_FREE_PATTERN
, m
,
2611 m_maxsize(MC_MBUF
));
2613 if (class == MC_MBUF_CL
)
2614 size
= m_maxsize(MC_CL
);
2615 else if (class == MC_MBUF_BIGCL
)
2616 size
= m_maxsize(MC_BIGCL
);
2618 size
= m_maxsize(MC_16KCL
);
2620 mcache_set_pattern(MCACHE_FREE_PATTERN
, cl
,
2625 MBUF_INIT(ms
, 0, MT_FREE
);
2626 if (class == MC_MBUF_16KCL
) {
2627 MBUF_16KCL_INIT(ms
, cl
, rfa
, 0, EXTF_COMPOSITE
);
2628 } else if (class == MC_MBUF_BIGCL
) {
2629 MBUF_BIGCL_INIT(ms
, cl
, rfa
, 0, EXTF_COMPOSITE
);
2631 MBUF_CL_INIT(ms
, cl
, rfa
, 0, EXTF_COMPOSITE
);
2633 VERIFY(ms
->m_flags
== M_EXT
);
2634 VERIFY(m_get_rfa(ms
) != NULL
&& MBUF_IS_COMPOSITE(ms
));
2636 *list
= (mcache_obj_t
*)m
;
2637 (*list
)->obj_next
= NULL
;
2638 list
= *plist
= &(*list
)->obj_next
;
2643 * Free up what's left of the above.
2645 if (mp_list
!= NULL
)
2646 mcache_free_ext(m_cache(MC_MBUF
), mp_list
);
2647 if (clp_list
!= NULL
)
2648 mcache_free_ext(m_cache(cl_class
), clp_list
);
2649 if (ref_list
!= NULL
)
2650 mcache_free_ext(ref_cache
, ref_list
);
2652 lck_mtx_lock(mbuf_mlock
);
2653 if (num
> 0 || cnum
> 0) {
2654 m_total(class) += cnum
;
2655 VERIFY(m_total(class) <= m_maxlimit(class));
2656 m_alloc_cnt(class) += num
+ cnum
;
2658 if ((num
+ cnum
) < want
)
2659 m_fail_cnt(class) += (want
- (num
+ cnum
));
2660 lck_mtx_unlock(mbuf_mlock
);
2662 return (num
+ cnum
);
2666 * Common de-allocator for composite objects called by the CPU cache
2667 * layer when one or more elements need to be returned to the appropriate
2671 mbuf_cslab_free(void *arg
, mcache_obj_t
*list
, int purged
)
2673 mbuf_class_t
class = (mbuf_class_t
)arg
;
2677 ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
2679 lck_mtx_lock(mbuf_mlock
);
2681 num
= cslab_free(class, list
, purged
);
2682 m_free_cnt(class) += num
;
2684 if ((w
= mb_waiters
) > 0)
2687 lck_mtx_unlock(mbuf_mlock
);
2690 wakeup(mb_waitchan
);
2694 * Common auditor for composite objects called by the CPU cache layer
2695 * during an allocation or free request. For the former, this is called
2696 * after the objects are obtained from either the bucket or slab layer
2697 * and before they are returned to the caller. For the latter, this is
2698 * called immediately during free and before placing the objects into
2699 * the bucket or slab layer.
2702 mbuf_cslab_audit(void *arg
, mcache_obj_t
*list
, boolean_t alloc
)
2704 mbuf_class_t
class = (mbuf_class_t
)arg
, cl_class
;
2705 mcache_audit_t
*mca
;
2706 struct mbuf
*m
, *ms
;
2707 mcl_slab_t
*clsp
, *nsp
;
2711 ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
2712 if (class == MC_MBUF_CL
)
2714 else if (class == MC_MBUF_BIGCL
)
2715 cl_class
= MC_BIGCL
;
2717 cl_class
= MC_16KCL
;
2718 cl_size
= m_maxsize(cl_class
);
2720 while ((m
= ms
= (struct mbuf
*)list
) != NULL
) {
2721 lck_mtx_lock(mbuf_mlock
);
2722 /* Do the mbuf sanity checks and record its transaction */
2723 mca
= mcl_audit_buf2mca(MC_MBUF
, (mcache_obj_t
*)m
);
2724 mcl_audit_mbuf(mca
, m
, TRUE
, alloc
);
2726 mcache_buffer_log(mca
, m
, m_cache(class), &mb_start
);
2729 mca
->mca_uflags
|= MB_COMP_INUSE
;
2731 mca
->mca_uflags
&= ~MB_COMP_INUSE
;
2734 * Use the shadow mbuf in the audit structure if we are
2735 * freeing, since the contents of the actual mbuf has been
2736 * pattern-filled by the above call to mcl_audit_mbuf().
2738 if (!alloc
&& mclverify
)
2739 ms
= MCA_SAVED_MBUF_PTR(mca
);
2741 /* Do the cluster sanity checks and record its transaction */
2742 cl
= ms
->m_ext
.ext_buf
;
2743 clsp
= slab_get(cl
);
2744 VERIFY(ms
->m_flags
== M_EXT
&& cl
!= NULL
);
2745 VERIFY(m_get_rfa(ms
) != NULL
&& MBUF_IS_COMPOSITE(ms
));
2746 if (class == MC_MBUF_CL
)
2747 VERIFY(clsp
->sl_refcnt
>= 1 &&
2748 clsp
->sl_refcnt
<= NCLPG
);
2750 VERIFY(clsp
->sl_refcnt
>= 1 &&
2751 clsp
->sl_refcnt
<= NBCLPG
);
2753 if (class == MC_MBUF_16KCL
) {
2755 for (nsp
= clsp
, k
= 1; k
< NSLABSP16KB
; k
++) {
2757 /* Next slab must already be present */
2758 VERIFY(nsp
!= NULL
);
2759 VERIFY(nsp
->sl_refcnt
== 1);
2764 mca
= mcl_audit_buf2mca(cl_class
, cl
);
2765 mcl_audit_cluster(mca
, cl
, cl_size
, alloc
, FALSE
);
2767 mcache_buffer_log(mca
, cl
, m_cache(class), &mb_start
);
2770 mca
->mca_uflags
|= MB_COMP_INUSE
;
2772 mca
->mca_uflags
&= ~MB_COMP_INUSE
;
2773 lck_mtx_unlock(mbuf_mlock
);
2775 list
= list
->obj_next
;
2780 * Allocate some number of mbuf clusters and place on cluster freelist.
2783 m_clalloc(const u_int32_t num
, const int wait
, const u_int32_t bufsize
)
2787 int numpages
= 0, large_buffer
;
2788 vm_offset_t page
= 0;
2789 mcache_audit_t
*mca_list
= NULL
;
2790 mcache_obj_t
*con_list
= NULL
;
2794 /* Set if a buffer allocation needs allocation of multiple pages */
2795 large_buffer
= ((bufsize
== m_maxsize(MC_16KCL
)) &&
2796 PAGE_SIZE
< M16KCLBYTES
);
2797 VERIFY(bufsize
== m_maxsize(MC_BIGCL
) ||
2798 bufsize
== m_maxsize(MC_16KCL
));
2800 VERIFY((bufsize
== PAGE_SIZE
) ||
2801 (bufsize
> PAGE_SIZE
&& bufsize
== m_maxsize(MC_16KCL
)));
2803 if (bufsize
== m_size(MC_BIGCL
))
2808 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
2811 * Multiple threads may attempt to populate the cluster map one
2812 * after another. Since we drop the lock below prior to acquiring
2813 * the physical page(s), our view of the cluster map may no longer
2814 * be accurate, and we could end up over-committing the pages beyond
2815 * the maximum allowed for each class. To prevent it, this entire
2816 * operation (including the page mapping) is serialized.
2818 while (mb_clalloc_busy
) {
2819 mb_clalloc_waiters
++;
2820 (void) msleep(mb_clalloc_waitchan
, mbuf_mlock
,
2821 (PZERO
-1), "m_clalloc", NULL
);
2822 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
2825 /* We are busy now; tell everyone else to go away */
2826 mb_clalloc_busy
= TRUE
;
2829 * Honor the caller's wish to block or not block. We have a way
2830 * to grow the pool asynchronously using the mbuf worker thread.
2832 i
= m_howmany(num
, bufsize
);
2833 if (i
== 0 || (wait
& M_DONTWAIT
))
2836 lck_mtx_unlock(mbuf_mlock
);
2838 size
= round_page(i
* bufsize
);
2839 page
= kmem_mb_alloc(mb_map
, size
, large_buffer
);
2842 * If we did ask for "n" 16KB physically contiguous chunks
2843 * and didn't get them, then please try again without this
2846 if (large_buffer
&& page
== 0)
2847 page
= kmem_mb_alloc(mb_map
, size
, 0);
2850 if (bufsize
== m_maxsize(MC_BIGCL
)) {
2851 /* Try for 1 page if failed */
2853 page
= kmem_mb_alloc(mb_map
, size
, 0);
2857 lck_mtx_lock(mbuf_mlock
);
2862 VERIFY(IS_P2ALIGNED(page
, PAGE_SIZE
));
2863 numpages
= size
/ PAGE_SIZE
;
2865 /* If auditing is enabled, allocate the audit structures now */
2866 if (mclaudit
!= NULL
) {
2870 * Yes, I realize this is a waste of memory for clusters
2871 * that never get transformed into mbufs, as we may end
2872 * up with NMBPG-1 unused audit structures per cluster.
2873 * But doing so tremendously simplifies the allocation
2874 * strategy, since at this point we are not holding the
2875 * mbuf lock and the caller is okay to be blocked.
2877 if (bufsize
== PAGE_SIZE
) {
2878 needed
= numpages
* NMBPG
;
2880 i
= mcache_alloc_ext(mcl_audit_con_cache
,
2881 &con_list
, needed
, MCR_SLEEP
);
2883 VERIFY(con_list
!= NULL
&& i
== needed
);
2886 * if multiple 4K pages are being used for a
2889 needed
= numpages
/ NSLABSP16KB
;
2892 i
= mcache_alloc_ext(mcache_audit_cache
,
2893 (mcache_obj_t
**)&mca_list
, needed
, MCR_SLEEP
);
2895 VERIFY(mca_list
!= NULL
&& i
== needed
);
2898 lck_mtx_lock(mbuf_mlock
);
2900 for (i
= 0; i
< numpages
; i
++, page
+= PAGE_SIZE
) {
2902 ((unsigned char *)page
- mbutl
) >> PAGE_SHIFT
;
2903 ppnum_t new_page
= pmap_find_phys(kernel_pmap
, page
);
2906 * If there is a mapper the appropriate I/O page is
2907 * returned; zero out the page to discard its past
2908 * contents to prevent exposing leftover kernel memory.
2910 VERIFY(offset
< mcl_pages
);
2911 if (mcl_paddr_base
!= 0) {
2912 bzero((void *)(uintptr_t) page
, PAGE_SIZE
);
2913 new_page
= IOMapperInsertPage(mcl_paddr_base
,
2916 mcl_paddr
[offset
] = new_page
;
2918 /* Pattern-fill this fresh page */
2920 mcache_set_pattern(MCACHE_FREE_PATTERN
,
2921 (caddr_t
)page
, PAGE_SIZE
);
2923 if (bufsize
== PAGE_SIZE
) {
2925 /* One for the entire page */
2926 sp
= slab_get((void *)page
);
2927 if (mclaudit
!= NULL
) {
2928 mcl_audit_init((void *)page
,
2929 &mca_list
, &con_list
,
2930 AUDIT_CONTENTS_SIZE
, NMBPG
);
2932 VERIFY(sp
->sl_refcnt
== 0 && sp
->sl_flags
== 0);
2933 slab_init(sp
, class, SLF_MAPPED
, (void *)page
,
2934 (void *)page
, PAGE_SIZE
, 0, 1);
2935 buf
= (mcache_obj_t
*)page
;
2936 buf
->obj_next
= NULL
;
2938 /* Insert this slab */
2939 slab_insert(sp
, class);
2941 /* Update stats now since slab_get drops the lock */
2944 VERIFY(m_total(class) <= m_maxlimit(class));
2945 if (class == MC_BIGCL
) {
2946 mbstat
.m_bigclfree
= m_infree(MC_BIGCL
) +
2947 m_infree(MC_MBUF_BIGCL
);
2948 mbstat
.m_bigclusters
= m_total(MC_BIGCL
);
2951 } else if ((bufsize
> PAGE_SIZE
) &&
2952 (i
% NSLABSP16KB
) == 0) {
2953 union m16kcluster
*m16kcl
= (union m16kcluster
*)page
;
2957 /* One for the entire 16KB */
2958 sp
= slab_get(m16kcl
);
2959 if (mclaudit
!= NULL
)
2960 mcl_audit_init(m16kcl
, &mca_list
, NULL
, 0, 1);
2962 VERIFY(sp
->sl_refcnt
== 0 && sp
->sl_flags
== 0);
2963 slab_init(sp
, MC_16KCL
, SLF_MAPPED
,
2964 m16kcl
, m16kcl
, bufsize
, 0, 1);
2965 m16kcl
->m16kcl_next
= NULL
;
2968 * 2nd-Nth page's slab is part of the first one,
2969 * where N is NSLABSP16KB.
2971 for (k
= 1; k
< NSLABSP16KB
; k
++) {
2972 nsp
= slab_get(((union mbigcluster
*)page
) + k
);
2973 VERIFY(nsp
->sl_refcnt
== 0 &&
2974 nsp
->sl_flags
== 0);
2975 slab_init(nsp
, MC_16KCL
,
2976 SLF_MAPPED
| SLF_PARTIAL
,
2977 m16kcl
, NULL
, 0, 0, 0);
2979 /* Insert this slab */
2980 slab_insert(sp
, MC_16KCL
);
2982 /* Update stats now since slab_get drops the lock */
2983 ++m_infree(MC_16KCL
);
2984 ++m_total(MC_16KCL
);
2985 VERIFY(m_total(MC_16KCL
) <= m_maxlimit(MC_16KCL
));
2989 VERIFY(mca_list
== NULL
&& con_list
== NULL
);
2991 if (!mb_peak_newreport
&& mbuf_report_usage(class))
2992 mb_peak_newreport
= TRUE
;
2994 /* We're done; let others enter */
2995 mb_clalloc_busy
= FALSE
;
2996 if (mb_clalloc_waiters
> 0) {
2997 mb_clalloc_waiters
= 0;
2998 wakeup(mb_clalloc_waitchan
);
3003 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
3005 /* We're done; let others enter */
3006 mb_clalloc_busy
= FALSE
;
3007 if (mb_clalloc_waiters
> 0) {
3008 mb_clalloc_waiters
= 0;
3009 wakeup(mb_clalloc_waitchan
);
3013 * When non-blocking we kick a thread if we have to grow the
3014 * pool or if the number of free clusters is less than requested.
3016 if (i
> 0 && mbuf_worker_ready
&& mbuf_worker_needs_wakeup
) {
3017 wakeup((caddr_t
)&mbuf_worker_needs_wakeup
);
3018 mbuf_worker_needs_wakeup
= FALSE
;
3020 if (class == MC_BIGCL
) {
3023 * Remember total number of 4KB clusters needed
3026 i
+= m_total(MC_BIGCL
);
3027 if (i
> mbuf_expand_big
) {
3028 mbuf_expand_big
= i
;
3031 if (m_infree(MC_BIGCL
) >= num
)
3036 * Remember total number of 16KB clusters needed
3039 i
+= m_total(MC_16KCL
);
3040 if (i
> mbuf_expand_16k
) {
3041 mbuf_expand_16k
= i
;
3044 if (m_infree(MC_16KCL
) >= num
)
3051 * Populate the global freelist of the corresponding buffer class.
3054 freelist_populate(mbuf_class_t
class, unsigned int num
, int wait
)
3056 mcache_obj_t
*o
= NULL
;
3057 int i
, numpages
= 0, count
;
3058 mbuf_class_t super_class
;
3060 VERIFY(class == MC_MBUF
|| class == MC_CL
|| class == MC_BIGCL
||
3063 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
3065 VERIFY(PAGE_SIZE
== m_maxsize(MC_BIGCL
) ||
3066 PAGE_SIZE
== m_maxsize(MC_16KCL
));
3068 if (m_maxsize(class) >= PAGE_SIZE
)
3069 return(m_clalloc(num
, wait
, m_maxsize(class)) != 0);
3072 * The rest of the function will allocate pages and will slice
3073 * them up into the right size
3076 numpages
= (num
* m_size(class) + PAGE_SIZE
- 1) / PAGE_SIZE
;
3078 /* Currently assume that pages are 4K or 16K */
3079 if (PAGE_SIZE
== m_maxsize(MC_BIGCL
))
3080 super_class
= MC_BIGCL
;
3082 super_class
= MC_16KCL
;
3084 i
= m_clalloc(numpages
, wait
, m_maxsize(super_class
));
3086 /* Respect the minimum limit of super class */
3087 if (m_total(super_class
) == m_maxlimit(super_class
) &&
3088 m_infree(super_class
) <= m_minlimit(super_class
))
3089 if (wait
& MCR_COMP
)
3092 /* how many objects will we cut the page into? */
3093 int numobj
= PAGE_SIZE
/ m_maxsize(class);
3095 for (count
= 0; count
< numpages
; count
++) {
3096 /* respect totals, minlimit, maxlimit */
3097 if (m_total(super_class
) <= m_minlimit(super_class
) ||
3098 m_total(class) >= m_maxlimit(class))
3101 if ((o
= slab_alloc(super_class
, wait
)) == NULL
)
3104 struct mbuf
*m
= (struct mbuf
*)o
;
3105 union mcluster
*c
= (union mcluster
*)o
;
3106 union mbigcluster
*mbc
= (union mbigcluster
*)o
;
3107 mcl_slab_t
*sp
= slab_get(o
);
3108 mcache_audit_t
*mca
= NULL
;
3111 * since one full page will be converted to MC_MBUF or
3112 * MC_CL, verify that the reference count will match that
3115 VERIFY(sp
->sl_refcnt
== 1 && slab_is_detached(sp
));
3116 VERIFY((sp
->sl_flags
& (SLF_MAPPED
| SLF_PARTIAL
)) == SLF_MAPPED
);
3118 * Make sure that the cluster is unmolested
3122 mca
= mcl_audit_buf2mca(super_class
,
3124 mcache_audit_free_verify(mca
,
3125 (mcache_obj_t
*)o
, 0, m_maxsize(super_class
));
3128 /* Reinitialize it as an mbuf or 2K or 4K slab */
3129 slab_init(sp
, class, sp
->sl_flags
,
3130 sp
->sl_base
, NULL
, PAGE_SIZE
, 0, numobj
);
3132 VERIFY(sp
->sl_head
== NULL
);
3134 VERIFY(m_total(super_class
) >= 1);
3135 m_total(super_class
)--;
3137 if (super_class
== MC_BIGCL
)
3138 mbstat
.m_bigclusters
= m_total(MC_BIGCL
);
3140 m_total(class) += numobj
;
3141 m_infree(class) += numobj
;
3143 if (!mb_peak_newreport
&& mbuf_report_usage(class))
3144 mb_peak_newreport
= TRUE
;
3147 if (class == MC_MBUF
) {
3148 mbstat
.m_mbufs
= m_total(MC_MBUF
);
3149 mtype_stat_add(MT_FREE
, NMBPG
);
3152 * If auditing is enabled, construct the
3153 * shadow mbuf in the audit structure
3154 * instead of the actual one.
3155 * mbuf_slab_audit() will take care of
3156 * restoring the contents after the
3159 if (mclaudit
!= NULL
) {
3161 mca
= mcl_audit_buf2mca(MC_MBUF
,
3163 ms
= MCA_SAVED_MBUF_PTR(mca
);
3164 ms
->m_type
= MT_FREE
;
3166 m
->m_type
= MT_FREE
;
3168 m
->m_next
= sp
->sl_head
;
3169 sp
->sl_head
= (void *)m
++;
3171 } else if (class == MC_CL
) { /* MC_CL */
3173 m_infree(MC_CL
) + m_infree(MC_MBUF_CL
);
3174 mbstat
.m_clusters
= m_total(MC_CL
);
3176 c
->mcl_next
= sp
->sl_head
;
3177 sp
->sl_head
= (void *)c
++;
3180 VERIFY(class == MC_BIGCL
);
3181 mbstat
.m_bigclusters
= m_total(MC_BIGCL
);
3182 mbstat
.m_bigclfree
= m_infree(MC_BIGCL
) +
3183 m_infree(MC_MBUF_BIGCL
);
3185 mbc
->mbc_next
= sp
->sl_head
;
3186 sp
->sl_head
= (void *)mbc
++;
3190 /* Insert into the mbuf or 2k or 4k slab list */
3191 slab_insert(sp
, class);
3193 if ((i
= mb_waiters
) > 0)
3196 wakeup(mb_waitchan
);
3198 return (count
!= 0);
3202 * For each class, initialize the freelist to hold m_minlimit() objects.
3205 freelist_init(mbuf_class_t
class)
3207 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
3209 VERIFY(class == MC_CL
|| class == MC_BIGCL
);
3210 VERIFY(m_total(class) == 0);
3211 VERIFY(m_minlimit(class) > 0);
3213 while (m_total(class) < m_minlimit(class))
3214 (void) freelist_populate(class, m_minlimit(class), M_WAIT
);
3216 VERIFY(m_total(class) >= m_minlimit(class));
3220 * (Inaccurately) check if it might be worth a trip back to the
3221 * mcache layer due the availability of objects there. We'll
3222 * end up back here if there's nothing up there.
3225 mbuf_cached_above(mbuf_class_t
class, int wait
)
3229 if (wait
& MCR_COMP
)
3230 return (!mcache_bkt_isempty(m_cache(MC_MBUF_CL
)) ||
3231 !mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL
)));
3235 if (wait
& MCR_COMP
)
3236 return (!mcache_bkt_isempty(m_cache(MC_MBUF_CL
)));
3240 if (wait
& MCR_COMP
)
3241 return (!mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL
)));
3245 if (wait
& MCR_COMP
)
3246 return (!mcache_bkt_isempty(m_cache(MC_MBUF_16KCL
)));
3259 return (!mcache_bkt_isempty(m_cache(class)));
3263 * If possible, convert constructed objects to raw ones.
3266 mbuf_steal(mbuf_class_t
class, unsigned int num
)
3268 mcache_obj_t
*top
= NULL
;
3269 mcache_obj_t
**list
= &top
;
3270 unsigned int tot
= 0;
3272 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
3284 /* Get the required number of constructed objects if possible */
3285 if (m_infree(class) > m_minlimit(class)) {
3286 tot
= cslab_alloc(class, &list
,
3287 MIN(num
, m_infree(class)));
3290 /* And destroy them to get back the raw objects */
3292 (void) cslab_free(class, top
, 1);
3300 return (tot
== num
);
3304 m_reclaim(mbuf_class_t
class, unsigned int num
, boolean_t comp
)
3308 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
3310 VERIFY(m_total(MC_CL
) <= m_maxlimit(MC_CL
));
3311 VERIFY(m_total(MC_BIGCL
) <= m_maxlimit(MC_BIGCL
));
3312 VERIFY(m_total(MC_16KCL
) <= m_maxlimit(MC_16KCL
));
3315 * This logic can be made smarter; for now, simply mark
3316 * all other related classes as potential victims.
3320 m_wantpurge(MC_CL
)++;
3321 m_wantpurge(MC_BIGCL
)++;
3322 m_wantpurge(MC_MBUF_CL
)++;
3323 m_wantpurge(MC_MBUF_BIGCL
)++;
3327 m_wantpurge(MC_MBUF
)++;
3328 m_wantpurge(MC_BIGCL
)++;
3329 m_wantpurge(MC_MBUF_BIGCL
)++;
3331 m_wantpurge(MC_MBUF_CL
)++;
3335 m_wantpurge(MC_MBUF
)++;
3336 m_wantpurge(MC_CL
)++;
3337 m_wantpurge(MC_MBUF_CL
)++;
3339 m_wantpurge(MC_MBUF_BIGCL
)++;
3344 m_wantpurge(MC_MBUF_16KCL
)++;
3353 * Run through each marked class and check if we really need to
3354 * purge (and therefore temporarily disable) the per-CPU caches
3355 * layer used by the class. If so, remember the classes since
3356 * we are going to drop the lock below prior to purging.
3358 for (m
= 0; m
< NELEM(mbuf_table
); m
++) {
3359 if (m_wantpurge(m
) > 0) {
3362 * Try hard to steal the required number of objects
3363 * from the freelist of other mbuf classes. Only
3364 * purge and disable the per-CPU caches layer when
3365 * we don't have enough; it's the last resort.
3367 if (!mbuf_steal(m
, num
))
3372 lck_mtx_unlock(mbuf_mlock
);
3375 /* signal the domains to drain */
3376 net_drain_domains();
3378 /* Sigh; we have no other choices but to ask mcache to purge */
3379 for (m
= 0; m
< NELEM(mbuf_table
); m
++) {
3380 if ((bmap
& (1 << m
)) &&
3381 mcache_purge_cache(m_cache(m
), TRUE
)) {
3382 lck_mtx_lock(mbuf_mlock
);
3385 lck_mtx_unlock(mbuf_mlock
);
3390 * Request mcache to reap extra elements from all of its caches;
3391 * note that all reaps are serialized and happen only at a fixed
3396 lck_mtx_lock(mbuf_mlock
);
3399 static inline struct mbuf
*
3400 m_get_common(int wait
, short type
, int hdr
)
3403 int mcflags
= MSLEEPF(wait
);
3405 /* Is this due to a non-blocking retry? If so, then try harder */
3406 if (mcflags
& MCR_NOSLEEP
)
3407 mcflags
|= MCR_TRYHARD
;
3409 m
= mcache_alloc(m_cache(MC_MBUF
), mcflags
);
3411 MBUF_INIT(m
, hdr
, type
);
3412 mtype_stat_inc(type
);
3413 mtype_stat_dec(MT_FREE
);
3415 if (hdr
&& mac_init_mbuf(m
, wait
) != 0) {
3419 #endif /* MAC_NET */
3425 * Space allocation routines; these are also available as macros
3426 * for critical paths.
3428 #define _M_GET(wait, type) m_get_common(wait, type, 0)
3429 #define _M_GETHDR(wait, type) m_get_common(wait, type, 1)
3430 #define _M_RETRY(wait, type) _M_GET(wait, type)
3431 #define _M_RETRYHDR(wait, type) _M_GETHDR(wait, type)
3432 #define _MGET(m, how, type) ((m) = _M_GET(how, type))
3433 #define _MGETHDR(m, how, type) ((m) = _M_GETHDR(how, type))
3436 m_get(int wait
, int type
)
3438 return (_M_GET(wait
, type
));
3442 m_gethdr(int wait
, int type
)
3444 return (_M_GETHDR(wait
, type
));
3448 m_retry(int wait
, int type
)
3450 return (_M_RETRY(wait
, type
));
3454 m_retryhdr(int wait
, int type
)
3456 return (_M_RETRYHDR(wait
, type
));
3460 m_getclr(int wait
, int type
)
3464 _MGET(m
, wait
, type
);
3466 bzero(MTOD(m
, caddr_t
), MLEN
);
3471 m_free_paired(struct mbuf
*m
)
3473 VERIFY((m
->m_flags
& M_EXT
) && (MEXT_FLAGS(m
) & EXTF_PAIRED
));
3476 if (MEXT_PMBUF(m
) == m
) {
3477 volatile UInt16
*addr
= (volatile UInt16
*)&MEXT_PREF(m
);
3478 int16_t oprefcnt
, prefcnt
;
3481 * Paired ref count might be negative in case we lose
3482 * against another thread clearing MEXT_PMBUF, in the
3483 * event it occurs after the above memory barrier sync.
3484 * In that case just ignore as things have been unpaired.
3488 prefcnt
= oprefcnt
- 1;
3489 } while (!OSCompareAndSwap16(oprefcnt
, prefcnt
, addr
));
3493 } else if (prefcnt
== 1) {
3494 (*(m_get_ext_free(m
)))(m
->m_ext
.ext_buf
,
3495 m
->m_ext
.ext_size
, m_get_ext_arg(m
));
3497 } else if (prefcnt
== 0) {
3498 VERIFY(MBUF_IS_PAIRED(m
));
3501 * Restore minref to its natural value, so that
3502 * the caller will be able to free the cluster
3508 * Clear MEXT_PMBUF, but leave EXTF_PAIRED intact
3509 * as it is immutable. atomic_set_ptr also causes
3510 * memory barrier sync.
3512 atomic_set_ptr(&MEXT_PMBUF(m
), NULL
);
3514 switch (m
->m_ext
.ext_size
) {
3516 m_set_ext(m
, m_get_rfa(m
), NULL
, NULL
);
3520 m_set_ext(m
, m_get_rfa(m
), m_bigfree
, NULL
);
3524 m_set_ext(m
, m_get_rfa(m
), m_16kfree
, NULL
);
3535 * Tell caller the unpair has occurred, and that the reference
3536 * count on the external cluster held for the paired mbuf should
3543 m_free(struct mbuf
*m
)
3545 struct mbuf
*n
= m
->m_next
;
3547 if (m
->m_type
== MT_FREE
)
3548 panic("m_free: freeing an already freed mbuf");
3550 if (m
->m_flags
& M_PKTHDR
) {
3551 /* Check for scratch area overflow */
3552 m_redzone_verify(m
);
3553 /* Free the aux data and tags if there is any */
3554 m_tag_delete_chain(m
, NULL
);
3556 m_do_tx_compl_callback(m
, NULL
);
3559 if (m
->m_flags
& M_EXT
) {
3561 u_int32_t composite
;
3562 m_ext_free_func_t m_free_func
;
3564 if (MBUF_IS_PAIRED(m
) && m_free_paired(m
))
3567 refcnt
= m_decref(m
);
3568 composite
= (MEXT_FLAGS(m
) & EXTF_COMPOSITE
);
3569 m_free_func
= m_get_ext_free(m
);
3571 if (refcnt
== MEXT_MINREF(m
) && !composite
) {
3572 if (m_free_func
== NULL
) {
3573 mcache_free(m_cache(MC_CL
), m
->m_ext
.ext_buf
);
3574 } else if (m_free_func
== m_bigfree
) {
3575 mcache_free(m_cache(MC_BIGCL
),
3577 } else if (m_free_func
== m_16kfree
) {
3578 mcache_free(m_cache(MC_16KCL
),
3581 (*m_free_func
)(m
->m_ext
.ext_buf
,
3582 m
->m_ext
.ext_size
, m_get_ext_arg(m
));
3584 mcache_free(ref_cache
, m_get_rfa(m
));
3585 m_set_ext(m
, NULL
, NULL
, NULL
);
3586 } else if (refcnt
== MEXT_MINREF(m
) && composite
) {
3587 VERIFY(!(MEXT_FLAGS(m
) & EXTF_PAIRED
));
3588 VERIFY(m
->m_type
!= MT_FREE
);
3590 mtype_stat_dec(m
->m_type
);
3591 mtype_stat_inc(MT_FREE
);
3593 m
->m_type
= MT_FREE
;
3596 m
->m_next
= m
->m_nextpkt
= NULL
;
3598 MEXT_FLAGS(m
) &= ~EXTF_READONLY
;
3600 /* "Free" into the intermediate cache */
3601 if (m_free_func
== NULL
) {
3602 mcache_free(m_cache(MC_MBUF_CL
), m
);
3603 } else if (m_free_func
== m_bigfree
) {
3604 mcache_free(m_cache(MC_MBUF_BIGCL
), m
);
3606 VERIFY(m_free_func
== m_16kfree
);
3607 mcache_free(m_cache(MC_MBUF_16KCL
), m
);
3613 if (m
->m_type
!= MT_FREE
) {
3614 mtype_stat_dec(m
->m_type
);
3615 mtype_stat_inc(MT_FREE
);
3618 m
->m_type
= MT_FREE
;
3619 m
->m_flags
= m
->m_len
= 0;
3620 m
->m_next
= m
->m_nextpkt
= NULL
;
3622 mcache_free(m_cache(MC_MBUF
), m
);
3627 __private_extern__
struct mbuf
*
3628 m_clattach(struct mbuf
*m
, int type
, caddr_t extbuf
,
3629 void (*extfree
)(caddr_t
, u_int
, caddr_t
), u_int extsize
, caddr_t extarg
,
3632 struct ext_ref
*rfa
= NULL
;
3635 * If pairing is requested and an existing mbuf is provided, reject
3636 * it if it's already been paired to another cluster. Otherwise,
3637 * allocate a new one or free any existing below.
3639 if ((m
!= NULL
&& MBUF_IS_PAIRED(m
)) ||
3640 (m
== NULL
&& (m
= _M_GETHDR(wait
, type
)) == NULL
))
3643 if (m
->m_flags
& M_EXT
) {
3645 u_int32_t composite
;
3646 m_ext_free_func_t m_free_func
;
3648 refcnt
= m_decref(m
);
3649 composite
= (MEXT_FLAGS(m
) & EXTF_COMPOSITE
);
3650 VERIFY(!(MEXT_FLAGS(m
) & EXTF_PAIRED
) && MEXT_PMBUF(m
) == NULL
);
3651 m_free_func
= m_get_ext_free(m
);
3652 if (refcnt
== MEXT_MINREF(m
) && !composite
) {
3653 if (m_free_func
== NULL
) {
3654 mcache_free(m_cache(MC_CL
), m
->m_ext
.ext_buf
);
3655 } else if (m_free_func
== m_bigfree
) {
3656 mcache_free(m_cache(MC_BIGCL
),
3658 } else if (m_free_func
== m_16kfree
) {
3659 mcache_free(m_cache(MC_16KCL
),
3662 (*m_free_func
)(m
->m_ext
.ext_buf
,
3663 m
->m_ext
.ext_size
, m_get_ext_arg(m
));
3665 /* Re-use the reference structure */
3667 } else if (refcnt
== MEXT_MINREF(m
) && composite
) {
3668 VERIFY(m
->m_type
!= MT_FREE
);
3670 mtype_stat_dec(m
->m_type
);
3671 mtype_stat_inc(MT_FREE
);
3673 m
->m_type
= MT_FREE
;
3676 m
->m_next
= m
->m_nextpkt
= NULL
;
3678 MEXT_FLAGS(m
) &= ~EXTF_READONLY
;
3680 /* "Free" into the intermediate cache */
3681 if (m_free_func
== NULL
) {
3682 mcache_free(m_cache(MC_MBUF_CL
), m
);
3683 } else if (m_free_func
== m_bigfree
) {
3684 mcache_free(m_cache(MC_MBUF_BIGCL
), m
);
3686 VERIFY(m_free_func
== m_16kfree
);
3687 mcache_free(m_cache(MC_MBUF_16KCL
), m
);
3690 * Allocate a new mbuf, since we didn't divorce
3691 * the composite mbuf + cluster pair above.
3693 if ((m
= _M_GETHDR(wait
, type
)) == NULL
)
3699 (rfa
= mcache_alloc(ref_cache
, MSLEEPF(wait
))) == NULL
) {
3705 MEXT_INIT(m
, extbuf
, extsize
, extfree
, extarg
, rfa
,
3706 0, 1, 0, 0, 0, NULL
);
3708 MEXT_INIT(m
, extbuf
, extsize
, extfree
, (caddr_t
)m
, rfa
,
3709 1, 1, 1, EXTF_PAIRED
, 0, m
);
3716 * Perform `fast' allocation mbuf clusters from a cache of recently-freed
3717 * clusters. (If the cache is empty, new clusters are allocated en-masse.)
3720 m_getcl(int wait
, int type
, int flags
)
3723 int mcflags
= MSLEEPF(wait
);
3724 int hdr
= (flags
& M_PKTHDR
);
3726 /* Is this due to a non-blocking retry? If so, then try harder */
3727 if (mcflags
& MCR_NOSLEEP
)
3728 mcflags
|= MCR_TRYHARD
;
3730 m
= mcache_alloc(m_cache(MC_MBUF_CL
), mcflags
);
3733 struct ext_ref
*rfa
;
3736 VERIFY(m
->m_type
== MT_FREE
&& m
->m_flags
== M_EXT
);
3737 cl
= m
->m_ext
.ext_buf
;
3740 ASSERT(cl
!= NULL
&& rfa
!= NULL
);
3741 VERIFY(MBUF_IS_COMPOSITE(m
) && m_get_ext_free(m
) == NULL
);
3743 flag
= MEXT_FLAGS(m
);
3745 MBUF_INIT(m
, hdr
, type
);
3746 MBUF_CL_INIT(m
, cl
, rfa
, 1, flag
);
3748 mtype_stat_inc(type
);
3749 mtype_stat_dec(MT_FREE
);
3751 if (hdr
&& mac_init_mbuf(m
, wait
) != 0) {
3755 #endif /* MAC_NET */
3760 /* m_mclget() add an mbuf cluster to a normal mbuf */
3762 m_mclget(struct mbuf
*m
, int wait
)
3764 struct ext_ref
*rfa
;
3766 if ((rfa
= mcache_alloc(ref_cache
, MSLEEPF(wait
))) == NULL
)
3769 m
->m_ext
.ext_buf
= m_mclalloc(wait
);
3770 if (m
->m_ext
.ext_buf
!= NULL
) {
3771 MBUF_CL_INIT(m
, m
->m_ext
.ext_buf
, rfa
, 1, 0);
3773 mcache_free(ref_cache
, rfa
);
3778 /* Allocate an mbuf cluster */
3780 m_mclalloc(int wait
)
3782 int mcflags
= MSLEEPF(wait
);
3784 /* Is this due to a non-blocking retry? If so, then try harder */
3785 if (mcflags
& MCR_NOSLEEP
)
3786 mcflags
|= MCR_TRYHARD
;
3788 return (mcache_alloc(m_cache(MC_CL
), mcflags
));
3791 /* Free an mbuf cluster */
3793 m_mclfree(caddr_t p
)
3795 mcache_free(m_cache(MC_CL
), p
);
3799 * mcl_hasreference() checks if a cluster of an mbuf is referenced by
3800 * another mbuf; see comments in m_incref() regarding EXTF_READONLY.
3803 m_mclhasreference(struct mbuf
*m
)
3805 if (!(m
->m_flags
& M_EXT
))
3808 ASSERT(m_get_rfa(m
) != NULL
);
3810 return ((MEXT_FLAGS(m
) & EXTF_READONLY
) ? 1 : 0);
3813 __private_extern__ caddr_t
3814 m_bigalloc(int wait
)
3816 int mcflags
= MSLEEPF(wait
);
3818 /* Is this due to a non-blocking retry? If so, then try harder */
3819 if (mcflags
& MCR_NOSLEEP
)
3820 mcflags
|= MCR_TRYHARD
;
3822 return (mcache_alloc(m_cache(MC_BIGCL
), mcflags
));
3825 __private_extern__
void
3826 m_bigfree(caddr_t p
, __unused u_int size
, __unused caddr_t arg
)
3828 mcache_free(m_cache(MC_BIGCL
), p
);
3831 /* m_mbigget() add an 4KB mbuf cluster to a normal mbuf */
3832 __private_extern__
struct mbuf
*
3833 m_mbigget(struct mbuf
*m
, int wait
)
3835 struct ext_ref
*rfa
;
3837 if ((rfa
= mcache_alloc(ref_cache
, MSLEEPF(wait
))) == NULL
)
3840 m
->m_ext
.ext_buf
= m_bigalloc(wait
);
3841 if (m
->m_ext
.ext_buf
!= NULL
) {
3842 MBUF_BIGCL_INIT(m
, m
->m_ext
.ext_buf
, rfa
, 1, 0);
3844 mcache_free(ref_cache
, rfa
);
3849 __private_extern__ caddr_t
3850 m_16kalloc(int wait
)
3852 int mcflags
= MSLEEPF(wait
);
3854 /* Is this due to a non-blocking retry? If so, then try harder */
3855 if (mcflags
& MCR_NOSLEEP
)
3856 mcflags
|= MCR_TRYHARD
;
3858 return (mcache_alloc(m_cache(MC_16KCL
), mcflags
));
3861 __private_extern__
void
3862 m_16kfree(caddr_t p
, __unused u_int size
, __unused caddr_t arg
)
3864 mcache_free(m_cache(MC_16KCL
), p
);
3867 /* m_m16kget() add a 16KB mbuf cluster to a normal mbuf */
3868 __private_extern__
struct mbuf
*
3869 m_m16kget(struct mbuf
*m
, int wait
)
3871 struct ext_ref
*rfa
;
3873 if ((rfa
= mcache_alloc(ref_cache
, MSLEEPF(wait
))) == NULL
)
3876 m
->m_ext
.ext_buf
= m_16kalloc(wait
);
3877 if (m
->m_ext
.ext_buf
!= NULL
) {
3878 MBUF_16KCL_INIT(m
, m
->m_ext
.ext_buf
, rfa
, 1, 0);
3880 mcache_free(ref_cache
, rfa
);
3886 * "Move" mbuf pkthdr from "from" to "to".
3887 * "from" must have M_PKTHDR set, and "to" must be empty.
3890 m_copy_pkthdr(struct mbuf
*to
, struct mbuf
*from
)
3892 VERIFY(from
->m_flags
& M_PKTHDR
);
3894 /* Check for scratch area overflow */
3895 m_redzone_verify(from
);
3897 if (to
->m_flags
& M_PKTHDR
) {
3898 /* Check for scratch area overflow */
3899 m_redzone_verify(to
);
3900 /* We will be taking over the tags of 'to' */
3901 m_tag_delete_chain(to
, NULL
);
3903 to
->m_pkthdr
= from
->m_pkthdr
; /* especially tags */
3904 m_classifier_init(from
, 0); /* purge classifier info */
3905 m_tag_init(from
, 1); /* purge all tags from src */
3906 m_scratch_init(from
); /* clear src scratch area */
3907 to
->m_flags
= (from
->m_flags
& M_COPYFLAGS
) | (to
->m_flags
& M_EXT
);
3908 if ((to
->m_flags
& M_EXT
) == 0)
3909 to
->m_data
= to
->m_pktdat
;
3910 m_redzone_init(to
); /* setup red zone on dst */
3914 * Duplicate "from"'s mbuf pkthdr in "to".
3915 * "from" must have M_PKTHDR set, and "to" must be empty.
3916 * In particular, this does a deep copy of the packet tags.
3919 m_dup_pkthdr(struct mbuf
*to
, struct mbuf
*from
, int how
)
3921 VERIFY(from
->m_flags
& M_PKTHDR
);
3923 /* Check for scratch area overflow */
3924 m_redzone_verify(from
);
3926 if (to
->m_flags
& M_PKTHDR
) {
3927 /* Check for scratch area overflow */
3928 m_redzone_verify(to
);
3929 /* We will be taking over the tags of 'to' */
3930 m_tag_delete_chain(to
, NULL
);
3932 to
->m_flags
= (from
->m_flags
& M_COPYFLAGS
) | (to
->m_flags
& M_EXT
);
3933 if ((to
->m_flags
& M_EXT
) == 0)
3934 to
->m_data
= to
->m_pktdat
;
3935 to
->m_pkthdr
= from
->m_pkthdr
;
3936 m_redzone_init(to
); /* setup red zone on dst */
3937 m_tag_init(to
, 0); /* preserve dst static tags */
3938 return (m_tag_copy_chain(to
, from
, how
));
3942 m_copy_pftag(struct mbuf
*to
, struct mbuf
*from
)
3944 memcpy(m_pftag(to
), m_pftag(from
), sizeof(struct pf_mtag
));
3946 m_pftag(to
)->pftag_hdr
= NULL
;
3947 m_pftag(to
)->pftag_flags
&= ~(PF_TAG_HDR_INET
|PF_TAG_HDR_INET6
);
3952 m_classifier_init(struct mbuf
*m
, uint32_t pktf_mask
)
3954 VERIFY(m
->m_flags
& M_PKTHDR
);
3956 m
->m_pkthdr
.pkt_proto
= 0;
3957 m
->m_pkthdr
.pkt_flowsrc
= 0;
3958 m
->m_pkthdr
.pkt_flowid
= 0;
3959 m
->m_pkthdr
.pkt_flags
&= pktf_mask
; /* caller-defined mask */
3960 /* preserve service class and interface info for loopback packets */
3961 if (!(m
->m_pkthdr
.pkt_flags
& PKTF_LOOP
))
3962 (void) m_set_service_class(m
, MBUF_SC_BE
);
3963 if (!(m
->m_pkthdr
.pkt_flags
& PKTF_IFAINFO
))
3964 m
->m_pkthdr
.pkt_ifainfo
= 0;
3966 m
->m_pkthdr
.pkt_bwseq
= 0;
3967 #endif /* MEASURE_BW */
3968 m
->m_pkthdr
.pkt_timestamp
= 0;
3972 m_copy_classifier(struct mbuf
*to
, struct mbuf
*from
)
3974 VERIFY(to
->m_flags
& M_PKTHDR
);
3975 VERIFY(from
->m_flags
& M_PKTHDR
);
3977 to
->m_pkthdr
.pkt_proto
= from
->m_pkthdr
.pkt_proto
;
3978 to
->m_pkthdr
.pkt_flowsrc
= from
->m_pkthdr
.pkt_flowsrc
;
3979 to
->m_pkthdr
.pkt_flowid
= from
->m_pkthdr
.pkt_flowid
;
3980 to
->m_pkthdr
.pkt_flags
= from
->m_pkthdr
.pkt_flags
;
3981 (void) m_set_service_class(to
, from
->m_pkthdr
.pkt_svc
);
3982 to
->m_pkthdr
.pkt_ifainfo
= from
->m_pkthdr
.pkt_ifainfo
;
3984 to
->m_pkthdr
.pkt_bwseq
= from
->m_pkthdr
.pkt_bwseq
;
3985 #endif /* MEASURE_BW */
3989 * Return a list of mbuf hdrs that point to clusters. Try for num_needed;
3990 * if wantall is not set, return whatever number were available. Set up the
3991 * first num_with_pkthdrs with mbuf hdrs configured as packet headers; these
3992 * are chained on the m_nextpkt field. Any packets requested beyond this
3993 * are chained onto the last packet header's m_next field. The size of
3994 * the cluster is controlled by the parameter bufsize.
3996 __private_extern__
struct mbuf
*
3997 m_getpackets_internal(unsigned int *num_needed
, int num_with_pkthdrs
,
3998 int wait
, int wantall
, size_t bufsize
)
4001 struct mbuf
**np
, *top
;
4002 unsigned int pnum
, needed
= *num_needed
;
4003 mcache_obj_t
*mp_list
= NULL
;
4004 int mcflags
= MSLEEPF(wait
);
4006 struct ext_ref
*rfa
;
4010 ASSERT(bufsize
== m_maxsize(MC_CL
) ||
4011 bufsize
== m_maxsize(MC_BIGCL
) ||
4012 bufsize
== m_maxsize(MC_16KCL
));
4015 * Caller must first check for njcl because this
4016 * routine is internal and not exposed/used via KPI.
4018 VERIFY(bufsize
!= m_maxsize(MC_16KCL
) || njcl
> 0);
4025 * The caller doesn't want all the requested buffers; only some.
4026 * Try hard to get what we can, but don't block. This effectively
4027 * overrides MCR_SLEEP, since this thread will not go to sleep
4028 * if we can't get all the buffers.
4030 if (!wantall
|| (mcflags
& MCR_NOSLEEP
))
4031 mcflags
|= MCR_TRYHARD
;
4033 /* Allocate the composite mbuf + cluster elements from the cache */
4034 if (bufsize
== m_maxsize(MC_CL
))
4035 cp
= m_cache(MC_MBUF_CL
);
4036 else if (bufsize
== m_maxsize(MC_BIGCL
))
4037 cp
= m_cache(MC_MBUF_BIGCL
);
4039 cp
= m_cache(MC_MBUF_16KCL
);
4040 needed
= mcache_alloc_ext(cp
, &mp_list
, needed
, mcflags
);
4042 for (pnum
= 0; pnum
< needed
; pnum
++) {
4043 m
= (struct mbuf
*)mp_list
;
4044 mp_list
= mp_list
->obj_next
;
4046 VERIFY(m
->m_type
== MT_FREE
&& m
->m_flags
== M_EXT
);
4047 cl
= m
->m_ext
.ext_buf
;
4050 ASSERT(cl
!= NULL
&& rfa
!= NULL
);
4051 VERIFY(MBUF_IS_COMPOSITE(m
));
4053 flag
= MEXT_FLAGS(m
);
4055 MBUF_INIT(m
, num_with_pkthdrs
, MT_DATA
);
4056 if (bufsize
== m_maxsize(MC_16KCL
)) {
4057 MBUF_16KCL_INIT(m
, cl
, rfa
, 1, flag
);
4058 } else if (bufsize
== m_maxsize(MC_BIGCL
)) {
4059 MBUF_BIGCL_INIT(m
, cl
, rfa
, 1, flag
);
4061 MBUF_CL_INIT(m
, cl
, rfa
, 1, flag
);
4064 if (num_with_pkthdrs
> 0) {
4067 if (mac_mbuf_label_init(m
, wait
) != 0) {
4071 #endif /* MAC_NET */
4075 if (num_with_pkthdrs
> 0)
4080 ASSERT(pnum
!= *num_needed
|| mp_list
== NULL
);
4081 if (mp_list
!= NULL
)
4082 mcache_free_ext(cp
, mp_list
);
4085 mtype_stat_add(MT_DATA
, pnum
);
4086 mtype_stat_sub(MT_FREE
, pnum
);
4089 if (wantall
&& (pnum
!= *num_needed
)) {
4095 if (pnum
> *num_needed
) {
4096 printf("%s: File a radar related to <rdar://10146739>. \
4097 needed = %u, pnum = %u, num_needed = %u \n",
4098 __func__
, needed
, pnum
, *num_needed
);
4106 * Return list of mbuf linked by m_nextpkt. Try for numlist, and if
4107 * wantall is not set, return whatever number were available. The size of
4108 * each mbuf in the list is controlled by the parameter packetlen. Each
4109 * mbuf of the list may have a chain of mbufs linked by m_next. Each mbuf
4110 * in the chain is called a segment. If maxsegments is not null and the
4111 * value pointed to is not null, this specify the maximum number of segments
4112 * for a chain of mbufs. If maxsegments is zero or the value pointed to
4113 * is zero the caller does not have any restriction on the number of segments.
4114 * The actual number of segments of a mbuf chain is return in the value
4115 * pointed to by maxsegments.
4117 __private_extern__
struct mbuf
*
4118 m_allocpacket_internal(unsigned int *numlist
, size_t packetlen
,
4119 unsigned int *maxsegments
, int wait
, int wantall
, size_t wantsize
)
4121 struct mbuf
**np
, *top
, *first
= NULL
;
4122 size_t bufsize
, r_bufsize
;
4123 unsigned int num
= 0;
4124 unsigned int nsegs
= 0;
4125 unsigned int needed
, resid
;
4126 int mcflags
= MSLEEPF(wait
);
4127 mcache_obj_t
*mp_list
= NULL
, *rmp_list
= NULL
;
4128 mcache_t
*cp
= NULL
, *rcp
= NULL
;
4136 if (wantsize
== 0) {
4137 if (packetlen
<= MINCLSIZE
) {
4138 bufsize
= packetlen
;
4139 } else if (packetlen
> m_maxsize(MC_CL
)) {
4140 /* Use 4KB if jumbo cluster pool isn't available */
4141 if (packetlen
<= m_maxsize(MC_BIGCL
) || njcl
== 0)
4142 bufsize
= m_maxsize(MC_BIGCL
);
4144 bufsize
= m_maxsize(MC_16KCL
);
4146 bufsize
= m_maxsize(MC_CL
);
4148 } else if (wantsize
== m_maxsize(MC_CL
) ||
4149 wantsize
== m_maxsize(MC_BIGCL
) ||
4150 (wantsize
== m_maxsize(MC_16KCL
) && njcl
> 0)) {
4156 if (bufsize
<= MHLEN
) {
4158 } else if (bufsize
<= MINCLSIZE
) {
4159 if (maxsegments
!= NULL
&& *maxsegments
== 1) {
4160 bufsize
= m_maxsize(MC_CL
);
4165 } else if (bufsize
== m_maxsize(MC_16KCL
)) {
4167 nsegs
= ((packetlen
- 1) >> M16KCLSHIFT
) + 1;
4168 } else if (bufsize
== m_maxsize(MC_BIGCL
)) {
4169 nsegs
= ((packetlen
- 1) >> MBIGCLSHIFT
) + 1;
4171 nsegs
= ((packetlen
- 1) >> MCLSHIFT
) + 1;
4173 if (maxsegments
!= NULL
) {
4174 if (*maxsegments
&& nsegs
> *maxsegments
) {
4175 *maxsegments
= nsegs
;
4178 *maxsegments
= nsegs
;
4182 * The caller doesn't want all the requested buffers; only some.
4183 * Try hard to get what we can, but don't block. This effectively
4184 * overrides MCR_SLEEP, since this thread will not go to sleep
4185 * if we can't get all the buffers.
4187 if (!wantall
|| (mcflags
& MCR_NOSLEEP
))
4188 mcflags
|= MCR_TRYHARD
;
4191 * Simple case where all elements in the lists/chains are mbufs.
4192 * Unless bufsize is greater than MHLEN, each segment chain is made
4193 * up of exactly 1 mbuf. Otherwise, each segment chain is made up
4194 * of 2 mbufs; the second one is used for the residual data, i.e.
4195 * the remaining data that cannot fit into the first mbuf.
4197 if (bufsize
<= MINCLSIZE
) {
4198 /* Allocate the elements in one shot from the mbuf cache */
4199 ASSERT(bufsize
<= MHLEN
|| nsegs
== 2);
4200 cp
= m_cache(MC_MBUF
);
4201 needed
= mcache_alloc_ext(cp
, &mp_list
,
4202 (*numlist
) * nsegs
, mcflags
);
4205 * The number of elements must be even if we are to use an
4206 * mbuf (instead of a cluster) to store the residual data.
4207 * If we couldn't allocate the requested number of mbufs,
4208 * trim the number down (if it's odd) in order to avoid
4209 * creating a partial segment chain.
4211 if (bufsize
> MHLEN
&& (needed
& 0x1))
4214 while (num
< needed
) {
4217 m
= (struct mbuf
*)mp_list
;
4218 mp_list
= mp_list
->obj_next
;
4221 MBUF_INIT(m
, 1, MT_DATA
);
4223 if (mac_init_mbuf(m
, wait
) != 0) {
4227 #endif /* MAC_NET */
4229 if (bufsize
> MHLEN
) {
4230 /* A second mbuf for this segment chain */
4231 m
->m_next
= (struct mbuf
*)mp_list
;
4232 mp_list
= mp_list
->obj_next
;
4233 ASSERT(m
->m_next
!= NULL
);
4235 MBUF_INIT(m
->m_next
, 0, MT_DATA
);
4241 ASSERT(num
!= *numlist
|| mp_list
== NULL
);
4244 mtype_stat_add(MT_DATA
, num
);
4245 mtype_stat_sub(MT_FREE
, num
);
4249 /* We've got them all; return to caller */
4250 if (num
== *numlist
)
4257 * Complex cases where elements are made up of one or more composite
4258 * mbufs + cluster, depending on packetlen. Each N-segment chain can
4259 * be illustrated as follows:
4261 * [mbuf + cluster 1] [mbuf + cluster 2] ... [mbuf + cluster N]
4263 * Every composite mbuf + cluster element comes from the intermediate
4264 * cache (either MC_MBUF_CL or MC_MBUF_BIGCL). For space efficiency,
4265 * the last composite element will come from the MC_MBUF_CL cache,
4266 * unless the residual data is larger than 2KB where we use the
4267 * big cluster composite cache (MC_MBUF_BIGCL) instead. Residual
4268 * data is defined as extra data beyond the first element that cannot
4269 * fit into the previous element, i.e. there is no residual data if
4270 * the chain only has 1 segment.
4272 r_bufsize
= bufsize
;
4273 resid
= packetlen
> bufsize
? packetlen
% bufsize
: 0;
4275 /* There is residual data; figure out the cluster size */
4276 if (wantsize
== 0 && packetlen
> MINCLSIZE
) {
4278 * Caller didn't request that all of the segments
4279 * in the chain use the same cluster size; use the
4280 * smaller of the cluster sizes.
4282 if (njcl
> 0 && resid
> m_maxsize(MC_BIGCL
))
4283 r_bufsize
= m_maxsize(MC_16KCL
);
4284 else if (resid
> m_maxsize(MC_CL
))
4285 r_bufsize
= m_maxsize(MC_BIGCL
);
4287 r_bufsize
= m_maxsize(MC_CL
);
4289 /* Use the same cluster size as the other segments */
4297 * Attempt to allocate composite mbuf + cluster elements for
4298 * the residual data in each chain; record the number of such
4299 * elements that can be allocated so that we know how many
4300 * segment chains we can afford to create.
4302 if (r_bufsize
<= m_maxsize(MC_CL
))
4303 rcp
= m_cache(MC_MBUF_CL
);
4304 else if (r_bufsize
<= m_maxsize(MC_BIGCL
))
4305 rcp
= m_cache(MC_MBUF_BIGCL
);
4307 rcp
= m_cache(MC_MBUF_16KCL
);
4308 needed
= mcache_alloc_ext(rcp
, &rmp_list
, *numlist
, mcflags
);
4313 /* This is temporarily reduced for calculation */
4319 * Attempt to allocate the rest of the composite mbuf + cluster
4320 * elements for the number of segment chains that we need.
4322 if (bufsize
<= m_maxsize(MC_CL
))
4323 cp
= m_cache(MC_MBUF_CL
);
4324 else if (bufsize
<= m_maxsize(MC_BIGCL
))
4325 cp
= m_cache(MC_MBUF_BIGCL
);
4327 cp
= m_cache(MC_MBUF_16KCL
);
4328 needed
= mcache_alloc_ext(cp
, &mp_list
, needed
* nsegs
, mcflags
);
4330 /* Round it down to avoid creating a partial segment chain */
4331 needed
= (needed
/ nsegs
) * nsegs
;
4337 * We're about to construct the chain(s); take into account
4338 * the number of segments we have created above to hold the
4339 * residual data for each chain, as well as restore the
4340 * original count of segments per chain.
4343 needed
+= needed
/ nsegs
;
4350 struct ext_ref
*rfa
;
4353 m_ext_free_func_t m_free_func
;
4356 if (nsegs
== 1 || (num
% nsegs
) != 0 || resid
== 0) {
4357 m
= (struct mbuf
*)mp_list
;
4358 mp_list
= mp_list
->obj_next
;
4360 m
= (struct mbuf
*)rmp_list
;
4361 rmp_list
= rmp_list
->obj_next
;
4363 m_free_func
= m_get_ext_free(m
);
4365 VERIFY(m
->m_type
== MT_FREE
&& m
->m_flags
== M_EXT
);
4366 VERIFY(m_free_func
== NULL
|| m_free_func
== m_bigfree
||
4367 m_free_func
== m_16kfree
);
4369 cl
= m
->m_ext
.ext_buf
;
4372 ASSERT(cl
!= NULL
&& rfa
!= NULL
);
4373 VERIFY(MBUF_IS_COMPOSITE(m
));
4375 flag
= MEXT_FLAGS(m
);
4377 pkthdr
= (nsegs
== 1 || (num
% nsegs
) == 1);
4380 MBUF_INIT(m
, pkthdr
, MT_DATA
);
4381 if (m_free_func
== m_16kfree
) {
4382 MBUF_16KCL_INIT(m
, cl
, rfa
, 1, flag
);
4383 } else if (m_free_func
== m_bigfree
) {
4384 MBUF_BIGCL_INIT(m
, cl
, rfa
, 1, flag
);
4386 MBUF_CL_INIT(m
, cl
, rfa
, 1, flag
);
4389 if (pkthdr
&& mac_init_mbuf(m
, wait
) != 0) {
4394 #endif /* MAC_NET */
4397 if ((num
% nsegs
) == 0)
4398 np
= &first
->m_nextpkt
;
4407 mtype_stat_add(MT_DATA
, num
);
4408 mtype_stat_sub(MT_FREE
, num
);
4413 /* We've got them all; return to caller */
4414 if (num
== *numlist
) {
4415 ASSERT(mp_list
== NULL
&& rmp_list
== NULL
);
4420 /* Free up what's left of the above */
4421 if (mp_list
!= NULL
)
4422 mcache_free_ext(cp
, mp_list
);
4423 if (rmp_list
!= NULL
)
4424 mcache_free_ext(rcp
, rmp_list
);
4425 if (wantall
&& top
!= NULL
) {
4434 * Best effort to get a mbuf cluster + pkthdr. Used by drivers to allocated
4435 * packets on receive ring.
4437 __private_extern__
struct mbuf
*
4438 m_getpacket_how(int wait
)
4440 unsigned int num_needed
= 1;
4442 return (m_getpackets_internal(&num_needed
, 1, wait
, 1,
4447 * Best effort to get a mbuf cluster + pkthdr. Used by drivers to allocated
4448 * packets on receive ring.
4453 unsigned int num_needed
= 1;
4455 return (m_getpackets_internal(&num_needed
, 1, M_WAIT
, 1,
4460 * Return a list of mbuf hdrs that point to clusters. Try for num_needed;
4461 * if this can't be met, return whatever number were available. Set up the
4462 * first num_with_pkthdrs with mbuf hdrs configured as packet headers. These
4463 * are chained on the m_nextpkt field. Any packets requested beyond this are
4464 * chained onto the last packet header's m_next field.
4467 m_getpackets(int num_needed
, int num_with_pkthdrs
, int how
)
4469 unsigned int n
= num_needed
;
4471 return (m_getpackets_internal(&n
, num_with_pkthdrs
, how
, 0,
4476 * Return a list of mbuf hdrs set up as packet hdrs chained together
4477 * on the m_nextpkt field
4480 m_getpackethdrs(int num_needed
, int how
)
4483 struct mbuf
**np
, *top
;
4488 while (num_needed
--) {
4489 m
= _M_RETRYHDR(how
, MT_DATA
);
4501 * Free an mbuf list (m_nextpkt) while following m_next. Returns the count
4502 * for mbufs packets freed. Used by the drivers.
4505 m_freem_list(struct mbuf
*m
)
4507 struct mbuf
*nextpkt
;
4508 mcache_obj_t
*mp_list
= NULL
;
4509 mcache_obj_t
*mcl_list
= NULL
;
4510 mcache_obj_t
*mbc_list
= NULL
;
4511 mcache_obj_t
*m16k_list
= NULL
;
4512 mcache_obj_t
*m_mcl_list
= NULL
;
4513 mcache_obj_t
*m_mbc_list
= NULL
;
4514 mcache_obj_t
*m_m16k_list
= NULL
;
4515 mcache_obj_t
*ref_list
= NULL
;
4517 int mt_free
= 0, mt_data
= 0, mt_header
= 0, mt_soname
= 0, mt_tag
= 0;
4522 nextpkt
= m
->m_nextpkt
;
4523 m
->m_nextpkt
= NULL
;
4526 struct mbuf
*next
= m
->m_next
;
4527 mcache_obj_t
*o
, *rfa
;
4528 u_int32_t composite
;
4530 m_ext_free_func_t m_free_func
;
4532 if (m
->m_type
== MT_FREE
)
4533 panic("m_free: freeing an already freed mbuf");
4535 if (m
->m_flags
& M_PKTHDR
) {
4536 /* Check for scratch area overflow */
4537 m_redzone_verify(m
);
4538 /* Free the aux data and tags if there is any */
4539 m_tag_delete_chain(m
, NULL
);
4542 if (!(m
->m_flags
& M_EXT
)) {
4547 if (MBUF_IS_PAIRED(m
) && m_free_paired(m
)) {
4554 o
= (mcache_obj_t
*)(void *)m
->m_ext
.ext_buf
;
4555 refcnt
= m_decref(m
);
4556 composite
= (MEXT_FLAGS(m
) & EXTF_COMPOSITE
);
4557 m_free_func
= m_get_ext_free(m
);
4558 if (refcnt
== MEXT_MINREF(m
) && !composite
) {
4559 if (m_free_func
== NULL
) {
4560 o
->obj_next
= mcl_list
;
4562 } else if (m_free_func
== m_bigfree
) {
4563 o
->obj_next
= mbc_list
;
4565 } else if (m_free_func
== m_16kfree
) {
4566 o
->obj_next
= m16k_list
;
4569 (*(m_free_func
))((caddr_t
)o
,
4573 rfa
= (mcache_obj_t
*)(void *)m_get_rfa(m
);
4574 rfa
->obj_next
= ref_list
;
4576 m_set_ext(m
, NULL
, NULL
, NULL
);
4577 } else if (refcnt
== MEXT_MINREF(m
) && composite
) {
4578 VERIFY(!(MEXT_FLAGS(m
) & EXTF_PAIRED
));
4579 VERIFY(m
->m_type
!= MT_FREE
);
4581 * Amortize the costs of atomic operations
4582 * by doing them at the end, if possible.
4584 if (m
->m_type
== MT_DATA
)
4586 else if (m
->m_type
== MT_HEADER
)
4588 else if (m
->m_type
== MT_SONAME
)
4590 else if (m
->m_type
== MT_TAG
)
4593 mtype_stat_dec(m
->m_type
);
4595 m
->m_type
= MT_FREE
;
4598 m
->m_next
= m
->m_nextpkt
= NULL
;
4600 MEXT_FLAGS(m
) &= ~EXTF_READONLY
;
4602 /* "Free" into the intermediate cache */
4603 o
= (mcache_obj_t
*)m
;
4604 if (m_free_func
== NULL
) {
4605 o
->obj_next
= m_mcl_list
;
4607 } else if (m_free_func
== m_bigfree
) {
4608 o
->obj_next
= m_mbc_list
;
4611 VERIFY(m_free_func
== m_16kfree
);
4612 o
->obj_next
= m_m16k_list
;
4620 * Amortize the costs of atomic operations
4621 * by doing them at the end, if possible.
4623 if (m
->m_type
== MT_DATA
)
4625 else if (m
->m_type
== MT_HEADER
)
4627 else if (m
->m_type
== MT_SONAME
)
4629 else if (m
->m_type
== MT_TAG
)
4631 else if (m
->m_type
!= MT_FREE
)
4632 mtype_stat_dec(m
->m_type
);
4634 m
->m_type
= MT_FREE
;
4635 m
->m_flags
= m
->m_len
= 0;
4636 m
->m_next
= m
->m_nextpkt
= NULL
;
4638 ((mcache_obj_t
*)m
)->obj_next
= mp_list
;
4639 mp_list
= (mcache_obj_t
*)m
;
4648 mtype_stat_add(MT_FREE
, mt_free
);
4650 mtype_stat_sub(MT_DATA
, mt_data
);
4652 mtype_stat_sub(MT_HEADER
, mt_header
);
4654 mtype_stat_sub(MT_SONAME
, mt_soname
);
4656 mtype_stat_sub(MT_TAG
, mt_tag
);
4658 if (mp_list
!= NULL
)
4659 mcache_free_ext(m_cache(MC_MBUF
), mp_list
);
4660 if (mcl_list
!= NULL
)
4661 mcache_free_ext(m_cache(MC_CL
), mcl_list
);
4662 if (mbc_list
!= NULL
)
4663 mcache_free_ext(m_cache(MC_BIGCL
), mbc_list
);
4664 if (m16k_list
!= NULL
)
4665 mcache_free_ext(m_cache(MC_16KCL
), m16k_list
);
4666 if (m_mcl_list
!= NULL
)
4667 mcache_free_ext(m_cache(MC_MBUF_CL
), m_mcl_list
);
4668 if (m_mbc_list
!= NULL
)
4669 mcache_free_ext(m_cache(MC_MBUF_BIGCL
), m_mbc_list
);
4670 if (m_m16k_list
!= NULL
)
4671 mcache_free_ext(m_cache(MC_MBUF_16KCL
), m_m16k_list
);
4672 if (ref_list
!= NULL
)
4673 mcache_free_ext(ref_cache
, ref_list
);
4679 m_freem(struct mbuf
*m
)
4686 * Mbuffer utility routines.
4690 * Compute the amount of space available before the current start
4691 * of data in an mbuf.
4694 m_leadingspace(struct mbuf
*m
)
4696 if (m
->m_flags
& M_EXT
) {
4697 if (MCLHASREFERENCE(m
))
4699 return (m
->m_data
- m
->m_ext
.ext_buf
);
4701 if (m
->m_flags
& M_PKTHDR
)
4702 return (m
->m_data
- m
->m_pktdat
);
4703 return (m
->m_data
- m
->m_dat
);
4707 * Compute the amount of space available after the end of data in an mbuf.
4710 m_trailingspace(struct mbuf
*m
)
4712 if (m
->m_flags
& M_EXT
) {
4713 if (MCLHASREFERENCE(m
))
4715 return (m
->m_ext
.ext_buf
+ m
->m_ext
.ext_size
-
4716 (m
->m_data
+ m
->m_len
));
4718 return (&m
->m_dat
[MLEN
] - (m
->m_data
+ m
->m_len
));
4722 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain,
4723 * copy junk along. Does not adjust packet header length.
4726 m_prepend(struct mbuf
*m
, int len
, int how
)
4730 _MGET(mn
, how
, m
->m_type
);
4735 if (m
->m_flags
& M_PKTHDR
) {
4736 M_COPY_PKTHDR(mn
, m
);
4737 m
->m_flags
&= ~M_PKTHDR
;
4741 if (m
->m_flags
& M_PKTHDR
) {
4742 VERIFY(len
<= MHLEN
);
4745 VERIFY(len
<= MLEN
);
4753 * Replacement for old M_PREPEND macro: allocate new mbuf to prepend to
4754 * chain, copy junk along, and adjust length.
4757 m_prepend_2(struct mbuf
*m
, int len
, int how
, int align
)
4759 if (M_LEADINGSPACE(m
) >= len
&&
4760 (!align
|| IS_P2ALIGNED((m
->m_data
- len
), sizeof(u_int32_t
)))) {
4764 m
= m_prepend(m
, len
, how
);
4766 if ((m
) && (m
->m_flags
& M_PKTHDR
))
4767 m
->m_pkthdr
.len
+= len
;
4772 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
4773 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
4774 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
4779 m_copym_mode(struct mbuf
*m
, int off0
, int len
, int wait
, uint32_t mode
)
4781 struct mbuf
*n
, *mhdr
= NULL
, **np
;
4786 if (off
< 0 || len
< 0)
4787 panic("m_copym: invalid offset %d or len %d", off
, len
);
4789 VERIFY((mode
!= M_COPYM_MUST_COPY_HDR
&&
4790 mode
!= M_COPYM_MUST_MOVE_HDR
) || (m
->m_flags
& M_PKTHDR
));
4792 if ((off
== 0 && (m
->m_flags
& M_PKTHDR
)) ||
4793 mode
== M_COPYM_MUST_COPY_HDR
|| mode
== M_COPYM_MUST_MOVE_HDR
) {
4798 while (off
>= m
->m_len
) {
4799 if (m
->m_next
== NULL
)
4800 panic("m_copym: invalid mbuf chain");
4809 if (len
!= M_COPYALL
)
4810 panic("m_copym: len != M_COPYALL");
4815 n
= _M_RETRYHDR(wait
, m
->m_type
);
4817 n
= _M_RETRY(wait
, m
->m_type
);
4824 if ((mode
== M_COPYM_MOVE_HDR
) ||
4825 (mode
== M_COPYM_MUST_MOVE_HDR
)) {
4826 M_COPY_PKTHDR(n
, mhdr
);
4827 } else if ((mode
== M_COPYM_COPY_HDR
) ||
4828 (mode
== M_COPYM_MUST_COPY_HDR
)) {
4829 if (m_dup_pkthdr(n
, mhdr
, wait
) == 0)
4832 if (len
== M_COPYALL
)
4833 n
->m_pkthdr
.len
-= off0
;
4835 n
->m_pkthdr
.len
= len
;
4838 * There is data to copy from the packet header mbuf
4839 * if it is empty or it is before the starting offset
4846 n
->m_len
= MIN(len
, (m
->m_len
- off
));
4847 if (m
->m_flags
& M_EXT
) {
4848 n
->m_ext
= m
->m_ext
;
4850 n
->m_data
= m
->m_data
+ off
;
4851 n
->m_flags
|= M_EXT
;
4854 * Limit to the capacity of the destination
4856 if (n
->m_flags
& M_PKTHDR
)
4857 n
->m_len
= MIN(n
->m_len
, MHLEN
);
4859 n
->m_len
= MIN(n
->m_len
, MLEN
);
4861 if (MTOD(n
, char *) + n
->m_len
> ((char *)n
) + MSIZE
)
4862 panic("%s n %p copy overflow",
4865 bcopy(MTOD(m
, caddr_t
)+off
, MTOD(n
, caddr_t
),
4866 (unsigned)n
->m_len
);
4868 if (len
!= M_COPYALL
)
4888 m_copym(struct mbuf
*m
, int off0
, int len
, int wait
)
4890 return (m_copym_mode(m
, off0
, len
, wait
, M_COPYM_MOVE_HDR
));
4894 * Equivalent to m_copym except that all necessary mbuf hdrs are allocated
4895 * within this routine also, the last mbuf and offset accessed are passed
4896 * out and can be passed back in to avoid having to rescan the entire mbuf
4897 * list (normally hung off of the socket)
4900 m_copym_with_hdrs(struct mbuf
*m0
, int off0
, int len0
, int wait
,
4901 struct mbuf
**m_lastm
, int *m_off
, uint32_t mode
)
4903 struct mbuf
*m
= m0
, *n
, **np
= NULL
;
4904 int off
= off0
, len
= len0
;
4905 struct mbuf
*top
= NULL
;
4906 int mcflags
= MSLEEPF(wait
);
4909 mcache_obj_t
*list
= NULL
;
4912 if (off
== 0 && (m
->m_flags
& M_PKTHDR
))
4915 if (m_lastm
!= NULL
&& *m_lastm
!= NULL
) {
4919 while (off
>= m
->m_len
) {
4929 len
-= MIN(len
, (n
->m_len
- ((needed
== 1) ? off
: 0)));
4936 * If the caller doesn't want to be put to sleep, mark it with
4937 * MCR_TRYHARD so that we may reclaim buffers from other places
4940 if (mcflags
& MCR_NOSLEEP
)
4941 mcflags
|= MCR_TRYHARD
;
4943 if (mcache_alloc_ext(m_cache(MC_MBUF
), &list
, needed
,
4949 n
= (struct mbuf
*)list
;
4950 list
= list
->obj_next
;
4951 ASSERT(n
!= NULL
&& m
!= NULL
);
4953 type
= (top
== NULL
) ? MT_HEADER
: m
->m_type
;
4954 MBUF_INIT(n
, (top
== NULL
), type
);
4956 if (top
== NULL
&& mac_mbuf_label_init(n
, wait
) != 0) {
4957 mtype_stat_inc(MT_HEADER
);
4958 mtype_stat_dec(MT_FREE
);
4962 #endif /* MAC_NET */
4974 if ((mode
== M_COPYM_MOVE_HDR
) ||
4975 (mode
== M_COPYM_MUST_MOVE_HDR
)) {
4976 M_COPY_PKTHDR(n
, m
);
4977 } else if ((mode
== M_COPYM_COPY_HDR
) ||
4978 (mode
== M_COPYM_MUST_COPY_HDR
)) {
4979 if (m_dup_pkthdr(n
, m
, wait
) == 0)
4982 n
->m_pkthdr
.len
= len
;
4985 n
->m_len
= MIN(len
, (m
->m_len
- off
));
4987 if (m
->m_flags
& M_EXT
) {
4988 n
->m_ext
= m
->m_ext
;
4990 n
->m_data
= m
->m_data
+ off
;
4991 n
->m_flags
|= M_EXT
;
4993 if (MTOD(n
, char *) + n
->m_len
> ((char *)n
) + MSIZE
)
4994 panic("%s n %p copy overflow",
4997 bcopy(MTOD(m
, caddr_t
)+off
, MTOD(n
, caddr_t
),
4998 (unsigned)n
->m_len
);
5003 if (m_lastm
!= NULL
&& m_off
!= NULL
) {
5004 if ((off
+ n
->m_len
) == m
->m_len
) {
5005 *m_lastm
= m
->m_next
;
5009 *m_off
= off
+ n
->m_len
;
5019 mtype_stat_inc(MT_HEADER
);
5020 mtype_stat_add(type
, needed
);
5021 mtype_stat_sub(MT_FREE
, needed
+ 1);
5023 ASSERT(list
== NULL
);
5028 mcache_free_ext(m_cache(MC_MBUF
), list
);
5036 * Copy data from an mbuf chain starting "off" bytes from the beginning,
5037 * continuing for "len" bytes, into the indicated buffer.
5040 m_copydata(struct mbuf
*m
, int off
, int len
, void *vp
)
5045 if (off
< 0 || len
< 0)
5046 panic("m_copydata: invalid offset %d or len %d", off
, len
);
5050 panic("m_copydata: invalid mbuf chain");
5058 panic("m_copydata: invalid mbuf chain");
5059 count
= MIN(m
->m_len
- off
, len
);
5060 bcopy(MTOD(m
, caddr_t
) + off
, cp
, count
);
5069 * Concatenate mbuf chain n to m. Both chains must be of the same type
5070 * (e.g. MT_DATA). Any m_pkthdr is not updated.
5073 m_cat(struct mbuf
*m
, struct mbuf
*n
)
5078 if ((m
->m_flags
& M_EXT
) ||
5079 m
->m_data
+ m
->m_len
+ n
->m_len
>= &m
->m_dat
[MLEN
]) {
5080 /* just join the two chains */
5084 /* splat the data from one into the other */
5085 bcopy(MTOD(n
, caddr_t
), MTOD(m
, caddr_t
) + m
->m_len
,
5087 m
->m_len
+= n
->m_len
;
5093 m_adj(struct mbuf
*mp
, int req_len
)
5099 if ((m
= mp
) == NULL
)
5105 while (m
!= NULL
&& len
> 0) {
5106 if (m
->m_len
<= len
) {
5117 if (m
->m_flags
& M_PKTHDR
)
5118 m
->m_pkthdr
.len
-= (req_len
- len
);
5121 * Trim from tail. Scan the mbuf chain,
5122 * calculating its length and finding the last mbuf.
5123 * If the adjustment only affects this mbuf, then just
5124 * adjust and return. Otherwise, rescan and truncate
5125 * after the remaining size.
5131 if (m
->m_next
== (struct mbuf
*)0)
5135 if (m
->m_len
>= len
) {
5138 if (m
->m_flags
& M_PKTHDR
)
5139 m
->m_pkthdr
.len
-= len
;
5146 * Correct length for chain is "count".
5147 * Find the mbuf with last data, adjust its length,
5148 * and toss data from remaining mbufs on chain.
5151 if (m
->m_flags
& M_PKTHDR
)
5152 m
->m_pkthdr
.len
= count
;
5153 for (; m
; m
= m
->m_next
) {
5154 if (m
->m_len
>= count
) {
5160 while ((m
= m
->m_next
))
5166 * Rearange an mbuf chain so that len bytes are contiguous
5167 * and in the data area of an mbuf (so that mtod and dtom
5168 * will work for a structure of size len). Returns the resulting
5169 * mbuf chain on success, frees it and returns null on failure.
5170 * If there is room, it will add up to max_protohdr-len extra bytes to the
5171 * contiguous region in an attempt to avoid being called next time.
5176 m_pullup(struct mbuf
*n
, int len
)
5183 * If first mbuf has no cluster, and has room for len bytes
5184 * without shifting current data, pullup into it,
5185 * otherwise allocate a new mbuf to prepend to the chain.
5187 if ((n
->m_flags
& M_EXT
) == 0 &&
5188 n
->m_data
+ len
< &n
->m_dat
[MLEN
] && n
->m_next
) {
5189 if (n
->m_len
>= len
)
5197 _MGET(m
, M_DONTWAIT
, n
->m_type
);
5201 if (n
->m_flags
& M_PKTHDR
) {
5202 M_COPY_PKTHDR(m
, n
);
5203 n
->m_flags
&= ~M_PKTHDR
;
5206 space
= &m
->m_dat
[MLEN
] - (m
->m_data
+ m
->m_len
);
5208 count
= MIN(MIN(MAX(len
, max_protohdr
), space
), n
->m_len
);
5209 bcopy(MTOD(n
, caddr_t
), MTOD(m
, caddr_t
) + m
->m_len
,
5219 } while (len
> 0 && n
);
5233 * Like m_pullup(), except a new mbuf is always allocated, and we allow
5234 * the amount of empty space before the data in the new mbuf to be specified
5235 * (in the event that the caller expects to prepend later).
5237 __private_extern__
int MSFail
= 0;
5239 __private_extern__
struct mbuf
*
5240 m_copyup(struct mbuf
*n
, int len
, int dstoff
)
5245 if (len
> (MHLEN
- dstoff
))
5247 MGET(m
, M_DONTWAIT
, n
->m_type
);
5251 if (n
->m_flags
& M_PKTHDR
) {
5252 m_copy_pkthdr(m
, n
);
5253 n
->m_flags
&= ~M_PKTHDR
;
5255 m
->m_data
+= dstoff
;
5256 space
= &m
->m_dat
[MLEN
] - (m
->m_data
+ m
->m_len
);
5258 count
= min(min(max(len
, max_protohdr
), space
), n
->m_len
);
5259 memcpy(mtod(m
, caddr_t
) + m
->m_len
, mtod(n
, caddr_t
),
5269 } while (len
> 0 && n
);
5283 * Partition an mbuf chain in two pieces, returning the tail --
5284 * all but the first len0 bytes. In case of failure, it returns NULL and
5285 * attempts to restore the chain to its original state.
5288 m_split(struct mbuf
*m0
, int len0
, int wait
)
5290 return (m_split0(m0
, len0
, wait
, 1));
5293 static struct mbuf
*
5294 m_split0(struct mbuf
*m0
, int len0
, int wait
, int copyhdr
)
5297 unsigned len
= len0
, remain
;
5299 for (m
= m0
; m
&& len
> m
->m_len
; m
= m
->m_next
)
5303 remain
= m
->m_len
- len
;
5304 if (copyhdr
&& (m0
->m_flags
& M_PKTHDR
)) {
5305 _MGETHDR(n
, wait
, m0
->m_type
);
5308 n
->m_pkthdr
.rcvif
= m0
->m_pkthdr
.rcvif
;
5309 n
->m_pkthdr
.len
= m0
->m_pkthdr
.len
- len0
;
5310 m0
->m_pkthdr
.len
= len0
;
5311 if (m
->m_flags
& M_EXT
)
5313 if (remain
> MHLEN
) {
5314 /* m can't be the lead packet */
5316 n
->m_next
= m_split(m
, len
, wait
);
5317 if (n
->m_next
== NULL
) {
5323 MH_ALIGN(n
, remain
);
5324 } else if (remain
== 0) {
5329 _MGET(n
, wait
, m
->m_type
);
5335 if (m
->m_flags
& M_EXT
) {
5336 n
->m_flags
|= M_EXT
;
5337 n
->m_ext
= m
->m_ext
;
5339 n
->m_data
= m
->m_data
+ len
;
5341 bcopy(MTOD(m
, caddr_t
) + len
, MTOD(n
, caddr_t
), remain
);
5345 n
->m_next
= m
->m_next
;
5351 * Routine to copy from device local memory into mbufs.
5354 m_devget(char *buf
, int totlen
, int off0
, struct ifnet
*ifp
,
5355 void (*copy
)(const void *, void *, size_t))
5358 struct mbuf
*top
= NULL
, **mp
= &top
;
5359 int off
= off0
, len
;
5367 * If 'off' is non-zero, packet is trailer-encapsulated,
5368 * so we have to skip the type and length fields.
5370 cp
+= off
+ 2 * sizeof (u_int16_t
);
5371 totlen
-= 2 * sizeof (u_int16_t
);
5373 _MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
5376 m
->m_pkthdr
.rcvif
= ifp
;
5377 m
->m_pkthdr
.len
= totlen
;
5380 while (totlen
> 0) {
5382 _MGET(m
, M_DONTWAIT
, MT_DATA
);
5389 len
= MIN(totlen
, epkt
- cp
);
5390 if (len
>= MINCLSIZE
) {
5391 MCLGET(m
, M_DONTWAIT
);
5392 if (m
->m_flags
& M_EXT
) {
5393 m
->m_len
= len
= MIN(len
, m_maxsize(MC_CL
));
5395 /* give up when it's out of cluster mbufs */
5403 * Place initial small packet/header at end of mbuf.
5405 if (len
< m
->m_len
) {
5407 len
+ max_linkhdr
<= m
->m_len
)
5408 m
->m_data
+= max_linkhdr
;
5415 copy(cp
, MTOD(m
, caddr_t
), (unsigned)len
);
5417 bcopy(cp
, MTOD(m
, caddr_t
), (unsigned)len
);
5428 #ifndef MBUF_GROWTH_NORMAL_THRESH
5429 #define MBUF_GROWTH_NORMAL_THRESH 25
5433 * Cluster freelist allocation check.
5436 m_howmany(int num
, size_t bufsize
)
5439 u_int32_t m_mbclusters
, m_clusters
, m_bigclusters
, m_16kclusters
;
5440 u_int32_t m_mbfree
, m_clfree
, m_bigclfree
, m_16kclfree
;
5441 u_int32_t sumclusters
, freeclusters
;
5442 u_int32_t percent_pool
, percent_kmem
;
5443 u_int32_t mb_growth
, mb_growth_thresh
;
5445 VERIFY(bufsize
== m_maxsize(MC_BIGCL
) ||
5446 bufsize
== m_maxsize(MC_16KCL
));
5448 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
5450 /* Numbers in 2K cluster units */
5451 m_mbclusters
= m_total(MC_MBUF
) >> NMBPCLSHIFT
;
5452 m_clusters
= m_total(MC_CL
);
5453 m_bigclusters
= m_total(MC_BIGCL
) << NCLPBGSHIFT
;
5454 m_16kclusters
= m_total(MC_16KCL
);
5455 sumclusters
= m_mbclusters
+ m_clusters
+ m_bigclusters
;
5457 m_mbfree
= m_infree(MC_MBUF
) >> NMBPCLSHIFT
;
5458 m_clfree
= m_infree(MC_CL
);
5459 m_bigclfree
= m_infree(MC_BIGCL
) << NCLPBGSHIFT
;
5460 m_16kclfree
= m_infree(MC_16KCL
);
5461 freeclusters
= m_mbfree
+ m_clfree
+ m_bigclfree
;
5463 /* Bail if we've maxed out the mbuf memory map */
5464 if ((bufsize
== m_maxsize(MC_BIGCL
) && sumclusters
>= nclusters
) ||
5465 (njcl
> 0 && bufsize
== m_maxsize(MC_16KCL
) &&
5466 (m_16kclusters
<< NCLPJCLSHIFT
) >= njcl
)) {
5470 if (bufsize
== m_maxsize(MC_BIGCL
)) {
5472 if (m_bigclusters
< m_minlimit(MC_BIGCL
))
5473 return (m_minlimit(MC_BIGCL
) - m_bigclusters
);
5476 ((sumclusters
- freeclusters
) * 100) / sumclusters
;
5477 percent_kmem
= (sumclusters
* 100) / nclusters
;
5480 * If a light/normal user, grow conservatively (75%)
5481 * If a heavy user, grow aggressively (50%)
5483 if (percent_kmem
< MBUF_GROWTH_NORMAL_THRESH
)
5484 mb_growth
= MB_GROWTH_NORMAL
;
5486 mb_growth
= MB_GROWTH_AGGRESSIVE
;
5488 if (percent_kmem
< 5) {
5489 /* For initial allocations */
5492 /* Return if >= MBIGCL_LOWAT clusters available */
5493 if (m_infree(MC_BIGCL
) >= MBIGCL_LOWAT
&&
5494 m_total(MC_BIGCL
) >=
5495 MBIGCL_LOWAT
+ m_minlimit(MC_BIGCL
))
5498 /* Ensure at least num clusters are accessible */
5499 if (num
>= m_infree(MC_BIGCL
))
5500 i
= num
- m_infree(MC_BIGCL
);
5501 if (num
> m_total(MC_BIGCL
) - m_minlimit(MC_BIGCL
))
5502 j
= num
- (m_total(MC_BIGCL
) -
5503 m_minlimit(MC_BIGCL
));
5508 * Grow pool if percent_pool > 75 (normal growth)
5509 * or percent_pool > 50 (aggressive growth).
5511 mb_growth_thresh
= 100 - (100 / (1 << mb_growth
));
5512 if (percent_pool
> mb_growth_thresh
)
5513 j
= ((sumclusters
+ num
) >> mb_growth
) -
5518 /* Check to ensure we didn't go over limits */
5519 if (i
+ m_bigclusters
>= m_maxlimit(MC_BIGCL
))
5520 i
= m_maxlimit(MC_BIGCL
) - m_bigclusters
;
5521 if ((i
<< 1) + sumclusters
>= nclusters
)
5522 i
= (nclusters
- sumclusters
) >> 1;
5523 VERIFY((m_total(MC_BIGCL
) + i
) <= m_maxlimit(MC_BIGCL
));
5524 VERIFY(sumclusters
+ (i
<< 1) <= nclusters
);
5526 } else { /* 16K CL */
5528 /* Ensure at least num clusters are available */
5529 if (num
>= m_16kclfree
)
5530 i
= num
- m_16kclfree
;
5532 /* Always grow 16KCL pool aggressively */
5533 if (((m_16kclusters
+ num
) >> 1) > m_16kclfree
)
5534 j
= ((m_16kclusters
+ num
) >> 1) - m_16kclfree
;
5537 /* Check to ensure we don't go over limit */
5538 if (i
+ m_16kclusters
>= m_maxlimit(MC_16KCL
))
5539 i
= m_maxlimit(MC_16KCL
) - m_16kclusters
;
5540 VERIFY((m_total(MC_16KCL
) + i
) <= m_maxlimit(MC_16KCL
));
5545 * Return the number of bytes in the mbuf chain, m.
5548 m_length(struct mbuf
*m
)
5551 unsigned int pktlen
;
5553 if (m
->m_flags
& M_PKTHDR
)
5554 return (m
->m_pkthdr
.len
);
5557 for (m0
= m
; m0
!= NULL
; m0
= m0
->m_next
)
5558 pktlen
+= m0
->m_len
;
5563 * Copy data from a buffer back into the indicated mbuf chain,
5564 * starting "off" bytes from the beginning, extending the mbuf
5565 * chain if necessary.
5568 m_copyback(struct mbuf
*m0
, int off
, int len
, const void *cp
)
5571 struct mbuf
*origm
= m0
;
5581 m_copyback0(&m0
, off
, len
, cp
,
5582 M_COPYBACK0_COPYBACK
| M_COPYBACK0_EXTEND
, M_DONTWAIT
);
5585 if (error
!= 0 || (m0
!= NULL
&& origm
!= m0
))
5586 panic("m_copyback");
5591 m_copyback_cow(struct mbuf
*m0
, int off
, int len
, const void *cp
, int how
)
5595 /* don't support chain expansion */
5596 VERIFY(off
+ len
<= m_length(m0
));
5598 error
= m_copyback0(&m0
, off
, len
, cp
,
5599 M_COPYBACK0_COPYBACK
| M_COPYBACK0_COW
, how
);
5602 * no way to recover from partial success.
5603 * just free the chain.
5612 * m_makewritable: ensure the specified range writable.
5615 m_makewritable(struct mbuf
**mp
, int off
, int len
, int how
)
5620 int origlen
, reslen
;
5622 origlen
= m_length(*mp
);
5625 #if 0 /* M_COPYALL is large enough */
5626 if (len
== M_COPYALL
)
5627 len
= m_length(*mp
) - off
; /* XXX */
5630 error
= m_copyback0(mp
, off
, len
, NULL
,
5631 M_COPYBACK0_PRESERVE
| M_COPYBACK0_COW
, how
);
5635 for (n
= *mp
; n
; n
= n
->m_next
)
5637 if (origlen
!= reslen
)
5638 panic("m_makewritable: length changed");
5639 if (((*mp
)->m_flags
& M_PKTHDR
) && reslen
!= (*mp
)->m_pkthdr
.len
)
5640 panic("m_makewritable: inconsist");
5647 m_copyback0(struct mbuf
**mp0
, int off
, int len
, const void *vp
, int flags
,
5654 const char *cp
= vp
;
5656 VERIFY(mp0
!= NULL
);
5657 VERIFY(*mp0
!= NULL
);
5658 VERIFY((flags
& M_COPYBACK0_PRESERVE
) == 0 || cp
== NULL
);
5659 VERIFY((flags
& M_COPYBACK0_COPYBACK
) == 0 || cp
!= NULL
);
5662 * we don't bother to update "totlen" in the case of M_COPYBACK0_COW,
5663 * assuming that M_COPYBACK0_EXTEND and M_COPYBACK0_COW are exclusive.
5666 VERIFY((~flags
& (M_COPYBACK0_EXTEND
|M_COPYBACK0_COW
)) != 0);
5670 while (off
> (mlen
= m
->m_len
)) {
5673 if (m
->m_next
== NULL
) {
5676 if (!(flags
& M_COPYBACK0_EXTEND
))
5680 * try to make some space at the end of "m".
5684 if (off
+ len
>= MINCLSIZE
&&
5685 !(m
->m_flags
& M_EXT
) && m
->m_len
== 0) {
5688 tspace
= M_TRAILINGSPACE(m
);
5690 tspace
= MIN(tspace
, off
+ len
);
5692 bzero(mtod(m
, char *) + m
->m_len
,
5701 * need to allocate an mbuf.
5704 if (off
+ len
>= MINCLSIZE
) {
5705 n
= m_getcl(how
, m
->m_type
, 0);
5707 n
= _M_GET(how
, m
->m_type
);
5713 n
->m_len
= MIN(M_TRAILINGSPACE(n
), off
+ len
);
5714 bzero(mtod(n
, char *), MIN(n
->m_len
, off
));
5721 mlen
= m
->m_len
- off
;
5722 if (mlen
!= 0 && m_mclhasreference(m
)) {
5727 * this mbuf is read-only.
5728 * allocate a new writable mbuf and try again.
5732 if (!(flags
& M_COPYBACK0_COW
))
5733 panic("m_copyback0: read-only");
5734 #endif /* DIAGNOSTIC */
5737 * if we're going to write into the middle of
5738 * a mbuf, split it first.
5740 if (off
> 0 && len
< mlen
) {
5741 n
= m_split0(m
, off
, how
, 0);
5752 * XXX TODO coalesce into the trailingspace of
5753 * the previous mbuf when possible.
5757 * allocate a new mbuf. copy packet header if needed.
5759 n
= _M_GET(how
, m
->m_type
);
5762 if (off
== 0 && (m
->m_flags
& M_PKTHDR
)) {
5763 M_COPY_PKTHDR(n
, m
);
5766 if (len
>= MINCLSIZE
)
5767 MCLGET(n
, M_DONTWAIT
);
5769 (n
->m_flags
& M_EXT
) ? MCLBYTES
: MLEN
;
5775 * free the region which has been overwritten.
5776 * copying data from old mbufs if requested.
5778 if (flags
& M_COPYBACK0_PRESERVE
)
5779 datap
= mtod(n
, char *);
5783 VERIFY(off
== 0 || eatlen
>= mlen
);
5785 VERIFY(len
>= mlen
);
5789 m_copydata(m
, off
, mlen
, datap
);
5796 while (m
!= NULL
&& m_mclhasreference(m
) &&
5797 n
->m_type
== m
->m_type
&& eatlen
> 0) {
5798 mlen
= MIN(eatlen
, m
->m_len
);
5800 m_copydata(m
, 0, mlen
, datap
);
5807 *mp
= m
= m_free(m
);
5815 mlen
= MIN(mlen
, len
);
5816 if (flags
& M_COPYBACK0_COPYBACK
) {
5817 bcopy(cp
, mtod(m
, caddr_t
) + off
, (unsigned)mlen
);
5826 if (m
->m_next
== NULL
) {
5833 if (((m
= *mp0
)->m_flags
& M_PKTHDR
) && (m
->m_pkthdr
.len
< totlen
)) {
5834 VERIFY(flags
& M_COPYBACK0_EXTEND
);
5835 m
->m_pkthdr
.len
= totlen
;
5845 mcl_to_paddr(char *addr
)
5847 vm_offset_t base_phys
;
5849 if (!MBUF_IN_MAP(addr
))
5851 base_phys
= mcl_paddr
[atop_64(addr
- (char *)mbutl
)];
5855 return ((uint64_t)(ptoa_64(base_phys
) | ((uint64_t)addr
& PAGE_MASK
)));
5859 * Dup the mbuf chain passed in. The whole thing. No cute additional cruft.
5860 * And really copy the thing. That way, we don't "precompute" checksums
5861 * for unsuspecting consumers. Assumption: m->m_nextpkt == 0. Trick: for
5862 * small packets, don't dup into a cluster. That way received packets
5863 * don't take up too much room in the sockbuf (cf. sbspace()).
5868 m_dup(struct mbuf
*m
, int how
)
5870 struct mbuf
*n
, **np
;
5876 if (m
->m_flags
& M_PKTHDR
)
5880 * Quick check: if we have one mbuf and its data fits in an
5881 * mbuf with packet header, just copy and go.
5883 if (m
->m_next
== NULL
) {
5884 /* Then just move the data into an mbuf and be done... */
5886 if (m
->m_pkthdr
.len
<= MHLEN
&& m
->m_len
<= MHLEN
) {
5887 if ((n
= _M_GETHDR(how
, m
->m_type
)) == NULL
)
5889 n
->m_len
= m
->m_len
;
5890 m_dup_pkthdr(n
, m
, how
);
5891 bcopy(m
->m_data
, n
->m_data
, m
->m_len
);
5894 } else if (m
->m_len
<= MLEN
) {
5895 if ((n
= _M_GET(how
, m
->m_type
)) == NULL
)
5897 bcopy(m
->m_data
, n
->m_data
, m
->m_len
);
5898 n
->m_len
= m
->m_len
;
5904 printf("<%x: %x, %x, %x\n", m
, m
->m_flags
, m
->m_len
,
5908 n
= _M_GETHDR(how
, m
->m_type
);
5910 n
= _M_GET(how
, m
->m_type
);
5913 if (m
->m_flags
& M_EXT
) {
5914 if (m
->m_len
<= m_maxsize(MC_CL
))
5916 else if (m
->m_len
<= m_maxsize(MC_BIGCL
))
5917 n
= m_mbigget(n
, how
);
5918 else if (m
->m_len
<= m_maxsize(MC_16KCL
) && njcl
> 0)
5919 n
= m_m16kget(n
, how
);
5920 if (!(n
->m_flags
& M_EXT
)) {
5927 /* Don't use M_COPY_PKTHDR: preserve m_data */
5928 m_dup_pkthdr(n
, m
, how
);
5930 if (!(n
->m_flags
& M_EXT
))
5931 n
->m_data
= n
->m_pktdat
;
5933 n
->m_len
= m
->m_len
;
5935 * Get the dup on the same bdry as the original
5936 * Assume that the two mbufs have the same offset to data area
5937 * (up to word boundaries)
5939 bcopy(MTOD(m
, caddr_t
), MTOD(n
, caddr_t
), (unsigned)n
->m_len
);
5943 printf(">%x: %x, %x, %x\n", n
, n
->m_flags
, n
->m_len
,
5958 #define MBUF_MULTIPAGES(m) \
5959 (((m)->m_flags & M_EXT) && \
5960 ((IS_P2ALIGNED((m)->m_data, PAGE_SIZE) \
5961 && (m)->m_len > PAGE_SIZE) || \
5962 (!IS_P2ALIGNED((m)->m_data, PAGE_SIZE) && \
5963 P2ROUNDUP((m)->m_data, PAGE_SIZE) < ((uintptr_t)(m)->m_data + (m)->m_len))))
5965 static struct mbuf
*
5966 m_expand(struct mbuf
*m
, struct mbuf
**last
)
5968 struct mbuf
*top
= NULL
;
5969 struct mbuf
**nm
= &top
;
5970 uintptr_t data0
, data
;
5971 unsigned int len0
, len
;
5973 VERIFY(MBUF_MULTIPAGES(m
));
5974 VERIFY(m
->m_next
== NULL
);
5975 data0
= (uintptr_t)m
->m_data
;
5983 if (IS_P2ALIGNED(data
, PAGE_SIZE
) && len0
> PAGE_SIZE
)
5985 else if (!IS_P2ALIGNED(data
, PAGE_SIZE
) &&
5986 P2ROUNDUP(data
, PAGE_SIZE
) < (data
+ len0
))
5987 len
= P2ROUNDUP(data
, PAGE_SIZE
) - data
;
5992 VERIFY(m
->m_flags
& M_EXT
);
5993 m
->m_data
= (void *)data
;
6005 n
= _M_RETRY(M_DONTWAIT
, MT_DATA
);
6012 n
->m_ext
= m
->m_ext
;
6014 n
->m_flags
|= M_EXT
;
6021 m_normalize(struct mbuf
*m
)
6023 struct mbuf
*top
= NULL
;
6024 struct mbuf
**nm
= &top
;
6025 boolean_t expanded
= FALSE
;
6033 /* Does the data cross one or more page boundaries? */
6034 if (MBUF_MULTIPAGES(m
)) {
6036 if ((m
= m_expand(m
, &last
)) == NULL
) {
6052 atomic_add_32(&mb_normalized
, 1);
6057 * Append the specified data to the indicated mbuf chain,
6058 * Extend the mbuf chain if the new data does not fit in
6061 * Return 1 if able to complete the job; otherwise 0.
6064 m_append(struct mbuf
*m0
, int len
, caddr_t cp
)
6067 int remainder
, space
;
6069 for (m
= m0
; m
->m_next
!= NULL
; m
= m
->m_next
)
6072 space
= M_TRAILINGSPACE(m
);
6075 * Copy into available space.
6077 if (space
> remainder
)
6079 bcopy(cp
, mtod(m
, caddr_t
) + m
->m_len
, space
);
6084 while (remainder
> 0) {
6086 * Allocate a new mbuf; could check space
6087 * and allocate a cluster instead.
6089 n
= m_get(M_WAITOK
, m
->m_type
);
6092 n
->m_len
= min(MLEN
, remainder
);
6093 bcopy(cp
, mtod(n
, caddr_t
), n
->m_len
);
6095 remainder
-= n
->m_len
;
6099 if (m0
->m_flags
& M_PKTHDR
)
6100 m0
->m_pkthdr
.len
+= len
- remainder
;
6101 return (remainder
== 0);
6105 m_last(struct mbuf
*m
)
6107 while (m
->m_next
!= NULL
)
6113 m_fixhdr(struct mbuf
*m0
)
6117 VERIFY(m0
->m_flags
& M_PKTHDR
);
6119 len
= m_length2(m0
, NULL
);
6120 m0
->m_pkthdr
.len
= len
;
6125 m_length2(struct mbuf
*m0
, struct mbuf
**last
)
6131 for (m
= m0
; m
!= NULL
; m
= m
->m_next
) {
6133 if (m
->m_next
== NULL
)
6142 * Defragment a mbuf chain, returning the shortest possible chain of mbufs
6143 * and clusters. If allocation fails and this cannot be completed, NULL will
6144 * be returned, but the passed in chain will be unchanged. Upon success,
6145 * the original chain will be freed, and the new chain will be returned.
6147 * If a non-packet header is passed in, the original mbuf (chain?) will
6148 * be returned unharmed.
6150 * If offset is specfied, the first mbuf in the chain will have a leading
6151 * space of the amount stated by the "off" parameter.
6153 * This routine requires that the m_pkthdr.header field of the original
6154 * mbuf chain is cleared by the caller.
6157 m_defrag_offset(struct mbuf
*m0
, u_int32_t off
, int how
)
6159 struct mbuf
*m_new
= NULL
, *m_final
= NULL
;
6160 int progress
= 0, length
, pktlen
;
6162 if (!(m0
->m_flags
& M_PKTHDR
))
6165 VERIFY(off
< MHLEN
);
6166 m_fixhdr(m0
); /* Needed sanity check */
6168 pktlen
= m0
->m_pkthdr
.len
+ off
;
6170 m_final
= m_getcl(how
, MT_DATA
, M_PKTHDR
);
6172 m_final
= m_gethdr(how
, MT_DATA
);
6174 if (m_final
== NULL
)
6179 m_final
->m_data
+= off
;
6183 * Caller must have handled the contents pointed to by this
6184 * pointer before coming here, as otherwise it will point to
6185 * the original mbuf which will get freed upon success.
6187 VERIFY(m0
->m_pkthdr
.pkt_hdr
== NULL
);
6189 if (m_dup_pkthdr(m_final
, m0
, how
) == 0)
6194 while (progress
< pktlen
) {
6195 length
= pktlen
- progress
;
6196 if (length
> MCLBYTES
)
6198 length
-= ((m_new
== m_final
) ? off
: 0);
6200 if (m_new
== NULL
) {
6202 m_new
= m_getcl(how
, MT_DATA
, 0);
6204 m_new
= m_get(how
, MT_DATA
);
6209 m_copydata(m0
, progress
, length
, mtod(m_new
, caddr_t
));
6211 m_new
->m_len
= length
;
6212 if (m_new
!= m_final
)
6213 m_cat(m_final
, m_new
);
6226 m_defrag(struct mbuf
*m0
, int how
)
6228 return (m_defrag_offset(m0
, 0, how
));
6232 m_mchtype(struct mbuf
*m
, int t
)
6235 mtype_stat_dec(m
->m_type
);
6240 m_mtod(struct mbuf
*m
)
6242 return (MTOD(m
, void *));
6248 return ((struct mbuf
*)((uintptr_t)(x
) & ~(MSIZE
-1)));
6252 m_mcheck(struct mbuf
*m
)
6258 * Return a pointer to mbuf/offset of location in mbuf chain.
6261 m_getptr(struct mbuf
*m
, int loc
, int *off
)
6265 /* Normal end of search. */
6266 if (m
->m_len
> loc
) {
6271 if (m
->m_next
== NULL
) {
6273 /* Point at the end of valid data. */
6286 * Inform the corresponding mcache(s) that there's a waiter below.
6289 mbuf_waiter_inc(mbuf_class_t
class, boolean_t comp
)
6291 mcache_waiter_inc(m_cache(class));
6293 if (class == MC_CL
) {
6294 mcache_waiter_inc(m_cache(MC_MBUF_CL
));
6295 } else if (class == MC_BIGCL
) {
6296 mcache_waiter_inc(m_cache(MC_MBUF_BIGCL
));
6297 } else if (class == MC_16KCL
) {
6298 mcache_waiter_inc(m_cache(MC_MBUF_16KCL
));
6300 mcache_waiter_inc(m_cache(MC_MBUF_CL
));
6301 mcache_waiter_inc(m_cache(MC_MBUF_BIGCL
));
6307 * Inform the corresponding mcache(s) that there's no more waiter below.
6310 mbuf_waiter_dec(mbuf_class_t
class, boolean_t comp
)
6312 mcache_waiter_dec(m_cache(class));
6314 if (class == MC_CL
) {
6315 mcache_waiter_dec(m_cache(MC_MBUF_CL
));
6316 } else if (class == MC_BIGCL
) {
6317 mcache_waiter_dec(m_cache(MC_MBUF_BIGCL
));
6318 } else if (class == MC_16KCL
) {
6319 mcache_waiter_dec(m_cache(MC_MBUF_16KCL
));
6321 mcache_waiter_dec(m_cache(MC_MBUF_CL
));
6322 mcache_waiter_dec(m_cache(MC_MBUF_BIGCL
));
6328 * Called during slab (blocking and non-blocking) allocation. If there
6329 * is at least one waiter, and the time since the first waiter is blocked
6330 * is greater than the watchdog timeout, panic the system.
6338 if (mb_waiters
== 0 || !mb_watchdog
)
6342 since
= now
.tv_sec
- mb_wdtstart
.tv_sec
;
6343 if (since
>= MB_WDT_MAXTIME
) {
6344 panic_plain("%s: %d waiters stuck for %u secs\n%s", __func__
,
6345 mb_waiters
, since
, mbuf_dump());
6351 * Called during blocking allocation. Returns TRUE if one or more objects
6352 * are available at the per-CPU caches layer and that allocation should be
6353 * retried at that level.
6356 mbuf_sleep(mbuf_class_t
class, unsigned int num
, int wait
)
6358 boolean_t mcache_retry
= FALSE
;
6360 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
6362 /* Check if there's anything at the cache layer */
6363 if (mbuf_cached_above(class, wait
)) {
6364 mcache_retry
= TRUE
;
6368 /* Nothing? Then try hard to get it from somewhere */
6369 m_reclaim(class, num
, (wait
& MCR_COMP
));
6371 /* We tried hard and got something? */
6372 if (m_infree(class) > 0) {
6375 } else if (mbuf_cached_above(class, wait
)) {
6377 mcache_retry
= TRUE
;
6379 } else if (wait
& MCR_TRYHARD
) {
6380 mcache_retry
= TRUE
;
6385 * There's really nothing for us right now; inform the
6386 * cache(s) that there is a waiter below and go to sleep.
6388 mbuf_waiter_inc(class, (wait
& MCR_COMP
));
6390 VERIFY(!(wait
& MCR_NOSLEEP
));
6393 * If this is the first waiter, arm the watchdog timer. Otherwise
6394 * check if we need to panic the system due to watchdog timeout.
6396 if (mb_waiters
== 0)
6397 microuptime(&mb_wdtstart
);
6402 (void) msleep(mb_waitchan
, mbuf_mlock
, (PZERO
-1), m_cname(class), NULL
);
6404 /* We are now up; stop getting notified until next round */
6405 mbuf_waiter_dec(class, (wait
& MCR_COMP
));
6407 /* We waited and got something */
6408 if (m_infree(class) > 0) {
6411 } else if (mbuf_cached_above(class, wait
)) {
6413 mcache_retry
= TRUE
;
6416 return (mcache_retry
);
6419 __attribute__((noreturn
))
6421 mbuf_worker_thread(void)
6426 lck_mtx_lock(mbuf_mlock
);
6428 if (mbuf_expand_mcl
) {
6431 /* Adjust to current number of cluster in use */
6432 n
= mbuf_expand_mcl
-
6433 (m_total(MC_CL
) - m_infree(MC_CL
));
6434 if ((n
+ m_total(MC_CL
)) > m_maxlimit(MC_CL
))
6435 n
= m_maxlimit(MC_CL
) - m_total(MC_CL
);
6436 mbuf_expand_mcl
= 0;
6438 if (n
> 0 && freelist_populate(MC_CL
, n
, M_WAIT
) > 0)
6441 if (mbuf_expand_big
) {
6444 /* Adjust to current number of 4 KB cluster in use */
6445 n
= mbuf_expand_big
-
6446 (m_total(MC_BIGCL
) - m_infree(MC_BIGCL
));
6447 if ((n
+ m_total(MC_BIGCL
)) > m_maxlimit(MC_BIGCL
))
6448 n
= m_maxlimit(MC_BIGCL
) - m_total(MC_BIGCL
);
6449 mbuf_expand_big
= 0;
6451 if (n
> 0 && freelist_populate(MC_BIGCL
, n
, M_WAIT
) > 0)
6454 if (mbuf_expand_16k
) {
6457 /* Adjust to current number of 16 KB cluster in use */
6458 n
= mbuf_expand_16k
-
6459 (m_total(MC_16KCL
) - m_infree(MC_16KCL
));
6460 if ((n
+ m_total(MC_16KCL
)) > m_maxlimit(MC_16KCL
))
6461 n
= m_maxlimit(MC_16KCL
) - m_total(MC_16KCL
);
6462 mbuf_expand_16k
= 0;
6465 (void) freelist_populate(MC_16KCL
, n
, M_WAIT
);
6469 * Because we can run out of memory before filling the mbuf
6470 * map, we should not allocate more clusters than they are
6471 * mbufs -- otherwise we could have a large number of useless
6472 * clusters allocated.
6475 while (m_total(MC_MBUF
) <
6476 (m_total(MC_BIGCL
) + m_total(MC_CL
))) {
6477 if (freelist_populate(MC_MBUF
, 1, M_WAIT
) == 0)
6482 mbuf_worker_needs_wakeup
= TRUE
;
6483 assert_wait((caddr_t
)&mbuf_worker_needs_wakeup
,
6485 lck_mtx_unlock(mbuf_mlock
);
6486 (void) thread_block((thread_continue_t
)mbuf_worker_thread
);
6490 __attribute__((noreturn
))
6492 mbuf_worker_thread_init(void)
6494 mbuf_worker_ready
++;
6495 mbuf_worker_thread();
6504 lck_mtx_assert(mbuf_mlock
, LCK_MTX_ASSERT_OWNED
);
6506 VERIFY(MBUF_IN_MAP(buf
));
6507 ix
= ((unsigned char *)buf
- mbutl
) >> MBSHIFT
;
6508 VERIFY(ix
< maxslabgrp
);
6510 if ((slg
= slabstbl
[ix
]) == NULL
) {
6512 * In the current implementation, we never shrink the slabs
6513 * table; if we attempt to reallocate a cluster group when
6514 * it's already allocated, panic since this is a sign of a
6515 * memory corruption (slabstbl[ix] got nullified).
6518 VERIFY(ix
< slabgrp
);
6520 * Slabs expansion can only be done single threaded; when
6521 * we get here, it must be as a result of m_clalloc() which
6522 * is serialized and therefore mb_clalloc_busy must be set.
6524 VERIFY(mb_clalloc_busy
);
6525 lck_mtx_unlock(mbuf_mlock
);
6527 /* This is a new buffer; create the slabs group for it */
6528 MALLOC(slg
, mcl_slabg_t
*, sizeof (*slg
), M_TEMP
,
6530 MALLOC(slg
->slg_slab
, mcl_slab_t
*, sizeof(mcl_slab_t
) * NSLABSPMB
,
6531 M_TEMP
, M_WAITOK
| M_ZERO
);
6532 VERIFY(slg
!= NULL
&& slg
->slg_slab
!= NULL
);
6534 lck_mtx_lock(mbuf_mlock
);
6536 * No other thread could have gone into m_clalloc() after
6537 * we dropped the lock above, so verify that it's true.
6539 VERIFY(mb_clalloc_busy
);
6543 /* Chain each slab in the group to its forward neighbor */
6544 for (k
= 1; k
< NSLABSPMB
; k
++)
6545 slg
->slg_slab
[k
- 1].sl_next
= &slg
->slg_slab
[k
];
6546 VERIFY(slg
->slg_slab
[NSLABSPMB
- 1].sl_next
== NULL
);
6548 /* And chain the last slab in the previous group to this */
6550 VERIFY(slabstbl
[ix
- 1]->
6551 slg_slab
[NSLABSPMB
- 1].sl_next
== NULL
);
6552 slabstbl
[ix
- 1]->slg_slab
[NSLABSPMB
- 1].sl_next
=
6557 ix
= MTOPG(buf
) % NSLABSPMB
;
6558 VERIFY(ix
< NSLABSPMB
);
6560 return (&slg
->slg_slab
[ix
]);
6564 slab_init(mcl_slab_t
*sp
, mbuf_class_t
class, u_int32_t flags
,
6565 void *base
, void *head
, unsigned int len
, int refcnt
, int chunks
)
6567 sp
->sl_class
= class;
6568 sp
->sl_flags
= flags
;
6572 sp
->sl_refcnt
= refcnt
;
6573 sp
->sl_chunks
= chunks
;
6578 slab_insert(mcl_slab_t
*sp
, mbuf_class_t
class)
6580 VERIFY(slab_is_detached(sp
));
6581 m_slab_cnt(class)++;
6582 TAILQ_INSERT_TAIL(&m_slablist(class), sp
, sl_link
);
6583 sp
->sl_flags
&= ~SLF_DETACHED
;
6586 * If a buffer spans multiple contiguous pages then mark them as
6589 if (class == MC_16KCL
) {
6591 for (k
= 1; k
< NSLABSP16KB
; k
++) {
6593 /* Next slab must already be present */
6594 VERIFY(sp
!= NULL
&& slab_is_detached(sp
));
6595 sp
->sl_flags
&= ~SLF_DETACHED
;
6601 slab_remove(mcl_slab_t
*sp
, mbuf_class_t
class)
6604 VERIFY(!slab_is_detached(sp
));
6605 VERIFY(m_slab_cnt(class) > 0);
6606 m_slab_cnt(class)--;
6607 TAILQ_REMOVE(&m_slablist(class), sp
, sl_link
);
6609 if (class == MC_16KCL
) {
6610 for (k
= 1; k
< NSLABSP16KB
; k
++) {
6612 /* Next slab must already be present */
6614 VERIFY(!slab_is_detached(sp
));
6621 slab_inrange(mcl_slab_t
*sp
, void *buf
)
6623 return ((uintptr_t)buf
>= (uintptr_t)sp
->sl_base
&&
6624 (uintptr_t)buf
< ((uintptr_t)sp
->sl_base
+ sp
->sl_len
));
6630 slab_nextptr_panic(mcl_slab_t
*sp
, void *addr
)
6633 unsigned int chunk_len
= sp
->sl_len
/ sp
->sl_chunks
;
6634 uintptr_t buf
= (uintptr_t)sp
->sl_base
;
6636 for (i
= 0; i
< sp
->sl_chunks
; i
++, buf
+= chunk_len
) {
6637 void *next
= ((mcache_obj_t
*)buf
)->obj_next
;
6641 if (next
!= NULL
&& !MBUF_IN_MAP(next
)) {
6642 mcache_t
*cp
= m_cache(sp
->sl_class
);
6643 panic("%s: %s buffer %p in slab %p modified "
6644 "after free at offset 0: %p out of range "
6645 "[%p-%p)\n", __func__
, cp
->mc_name
,
6646 (void *)buf
, sp
, next
, mbutl
, embutl
);
6650 mcache_audit_t
*mca
= mcl_audit_buf2mca(sp
->sl_class
,
6651 (mcache_obj_t
*)buf
);
6652 mcl_audit_verify_nextptr(next
, mca
);
6658 slab_detach(mcl_slab_t
*sp
)
6660 sp
->sl_link
.tqe_next
= (mcl_slab_t
*)-1;
6661 sp
->sl_link
.tqe_prev
= (mcl_slab_t
**)-1;
6662 sp
->sl_flags
|= SLF_DETACHED
;
6666 slab_is_detached(mcl_slab_t
*sp
)
6668 return ((intptr_t)sp
->sl_link
.tqe_next
== -1 &&
6669 (intptr_t)sp
->sl_link
.tqe_prev
== -1 &&
6670 (sp
->sl_flags
& SLF_DETACHED
));
6674 mcl_audit_init(void *buf
, mcache_audit_t
**mca_list
,
6675 mcache_obj_t
**con_list
, size_t con_size
, unsigned int num
)
6677 mcache_audit_t
*mca
, *mca_tail
;
6678 mcache_obj_t
*con
= NULL
;
6679 boolean_t save_contents
= (con_list
!= NULL
);
6682 ASSERT(num
<= NMBPG
);
6683 ASSERT(con_list
== NULL
|| con_size
!= 0);
6686 VERIFY(ix
< maxclaudit
);
6688 /* Make sure we haven't been here before */
6689 for (i
= 0; i
< NMBPG
; i
++)
6690 VERIFY(mclaudit
[ix
].cl_audit
[i
] == NULL
);
6692 mca
= mca_tail
= *mca_list
;
6696 for (i
= 0; i
< num
; i
++) {
6697 mcache_audit_t
*next
;
6699 next
= mca
->mca_next
;
6700 bzero(mca
, sizeof (*mca
));
6701 mca
->mca_next
= next
;
6702 mclaudit
[ix
].cl_audit
[i
] = mca
;
6704 /* Attach the contents buffer if requested */
6705 if (save_contents
) {
6706 mcl_saved_contents_t
*msc
=
6707 (mcl_saved_contents_t
*)(void *)con
;
6709 VERIFY(msc
!= NULL
);
6710 VERIFY(IS_P2ALIGNED(msc
, sizeof (u_int64_t
)));
6711 VERIFY(con_size
== sizeof (*msc
));
6712 mca
->mca_contents_size
= con_size
;
6713 mca
->mca_contents
= msc
;
6714 con
= con
->obj_next
;
6715 bzero(mca
->mca_contents
, mca
->mca_contents_size
);
6719 mca
= mca
->mca_next
;
6725 *mca_list
= mca_tail
->mca_next
;
6726 mca_tail
->mca_next
= NULL
;
6730 mcl_audit_free(void *buf
, unsigned int num
)
6733 mcache_audit_t
*mca
, *mca_list
;
6736 VERIFY(ix
< maxclaudit
);
6738 if (mclaudit
[ix
].cl_audit
[0] != NULL
) {
6739 mca_list
= mclaudit
[ix
].cl_audit
[0];
6740 for (i
= 0; i
< num
; i
++) {
6741 mca
= mclaudit
[ix
].cl_audit
[i
];
6742 mclaudit
[ix
].cl_audit
[i
] = NULL
;
6743 if (mca
->mca_contents
)
6744 mcache_free(mcl_audit_con_cache
,
6747 mcache_free_ext(mcache_audit_cache
,
6748 (mcache_obj_t
*)mca_list
);
6753 * Given an address of a buffer (mbuf/2KB/4KB/16KB), return
6754 * the corresponding audit structure for that buffer.
6756 static mcache_audit_t
*
6757 mcl_audit_buf2mca(mbuf_class_t
class, mcache_obj_t
*mobj
)
6759 mcache_audit_t
*mca
= NULL
;
6760 int ix
= MTOPG(mobj
), m_idx
= 0;
6761 unsigned char *page_addr
;
6763 VERIFY(ix
< maxclaudit
);
6764 VERIFY(IS_P2ALIGNED(mobj
, MIN(m_maxsize(class), PAGE_SIZE
)));
6766 page_addr
= PGTOM(ix
);
6771 * For the mbuf case, find the index of the page
6772 * used by the mbuf and use that index to locate the
6773 * base address of the page. Then find out the
6774 * mbuf index relative to the page base and use
6775 * it to locate the audit structure.
6777 m_idx
= MBPAGEIDX(page_addr
, mobj
);
6778 VERIFY(m_idx
< (int)NMBPG
);
6779 mca
= mclaudit
[ix
].cl_audit
[m_idx
];
6784 * Same thing as above, but for 2KB clusters in a page.
6786 m_idx
= CLPAGEIDX(page_addr
, mobj
);
6787 VERIFY(m_idx
< (int)NCLPG
);
6788 mca
= mclaudit
[ix
].cl_audit
[m_idx
];
6792 m_idx
= BCLPAGEIDX(page_addr
, mobj
);
6793 VERIFY(m_idx
< (int)NBCLPG
);
6794 mca
= mclaudit
[ix
].cl_audit
[m_idx
];
6798 * Same as above, but only return the first element.
6800 mca
= mclaudit
[ix
].cl_audit
[0];
6812 mcl_audit_mbuf(mcache_audit_t
*mca
, void *addr
, boolean_t composite
,
6815 struct mbuf
*m
= addr
;
6816 mcache_obj_t
*next
= ((mcache_obj_t
*)m
)->obj_next
;
6818 VERIFY(mca
->mca_contents
!= NULL
&&
6819 mca
->mca_contents_size
== AUDIT_CONTENTS_SIZE
);
6822 mcl_audit_verify_nextptr(next
, mca
);
6825 /* Save constructed mbuf fields */
6826 mcl_audit_save_mbuf(m
, mca
);
6828 mcache_set_pattern(MCACHE_FREE_PATTERN
, m
,
6829 m_maxsize(MC_MBUF
));
6831 ((mcache_obj_t
*)m
)->obj_next
= next
;
6835 /* Check if the buffer has been corrupted while in freelist */
6837 mcache_audit_free_verify_set(mca
, addr
, 0, m_maxsize(MC_MBUF
));
6839 /* Restore constructed mbuf fields */
6840 mcl_audit_restore_mbuf(m
, mca
, composite
);
6844 mcl_audit_restore_mbuf(struct mbuf
*m
, mcache_audit_t
*mca
, boolean_t composite
)
6846 struct mbuf
*ms
= MCA_SAVED_MBUF_PTR(mca
);
6849 struct mbuf
*next
= m
->m_next
;
6850 VERIFY(ms
->m_flags
== M_EXT
&& m_get_rfa(ms
) != NULL
&&
6851 MBUF_IS_COMPOSITE(ms
));
6852 VERIFY(mca
->mca_contents_size
== AUDIT_CONTENTS_SIZE
);
6854 * We could have hand-picked the mbuf fields and restore
6855 * them individually, but that will be a maintenance
6856 * headache. Instead, restore everything that was saved;
6857 * the mbuf layer will recheck and reinitialize anyway.
6859 bcopy(ms
, m
, MCA_SAVED_MBUF_SIZE
);
6863 * For a regular mbuf (no cluster attached) there's nothing
6864 * to restore other than the type field, which is expected
6867 m
->m_type
= ms
->m_type
;
6873 mcl_audit_save_mbuf(struct mbuf
*m
, mcache_audit_t
*mca
)
6875 VERIFY(mca
->mca_contents_size
== AUDIT_CONTENTS_SIZE
);
6877 bcopy(m
, MCA_SAVED_MBUF_PTR(mca
), MCA_SAVED_MBUF_SIZE
);
6881 mcl_audit_cluster(mcache_audit_t
*mca
, void *addr
, size_t size
, boolean_t alloc
,
6882 boolean_t save_next
)
6884 mcache_obj_t
*next
= ((mcache_obj_t
*)addr
)->obj_next
;
6888 mcache_set_pattern(MCACHE_FREE_PATTERN
, addr
, size
);
6891 mcl_audit_verify_nextptr(next
, mca
);
6892 ((mcache_obj_t
*)addr
)->obj_next
= next
;
6894 } else if (mclverify
) {
6895 /* Check if the buffer has been corrupted while in freelist */
6896 mcl_audit_verify_nextptr(next
, mca
);
6897 mcache_audit_free_verify_set(mca
, addr
, 0, size
);
6902 mcl_audit_scratch(mcache_audit_t
*mca
)
6904 void *stack
[MCACHE_STACK_DEPTH
+ 1];
6905 mcl_scratch_audit_t
*msa
;
6908 VERIFY(mca
->mca_contents
!= NULL
);
6909 msa
= MCA_SAVED_SCRATCH_PTR(mca
);
6911 msa
->msa_pthread
= msa
->msa_thread
;
6912 msa
->msa_thread
= current_thread();
6913 bcopy(msa
->msa_stack
, msa
->msa_pstack
, sizeof (msa
->msa_pstack
));
6914 msa
->msa_pdepth
= msa
->msa_depth
;
6915 bzero(stack
, sizeof (stack
));
6916 msa
->msa_depth
= OSBacktrace(stack
, MCACHE_STACK_DEPTH
+ 1) - 1;
6917 bcopy(&stack
[1], msa
->msa_stack
, sizeof (msa
->msa_stack
));
6919 msa
->msa_ptstamp
= msa
->msa_tstamp
;
6921 /* tstamp is in ms relative to base_ts */
6922 msa
->msa_tstamp
= ((now
.tv_usec
- mb_start
.tv_usec
) / 1000);
6923 if ((now
.tv_sec
- mb_start
.tv_sec
) > 0)
6924 msa
->msa_tstamp
+= ((now
.tv_sec
- mb_start
.tv_sec
) * 1000);
6928 mcl_audit_mcheck_panic(struct mbuf
*m
)
6930 mcache_audit_t
*mca
;
6933 mca
= mcl_audit_buf2mca(MC_MBUF
, (mcache_obj_t
*)m
);
6935 panic("mcl_audit: freed mbuf %p with type 0x%x (instead of 0x%x)\n%s\n",
6936 m
, (u_int16_t
)m
->m_type
, MT_FREE
, mcache_dump_mca(mca
));
6941 mcl_audit_verify_nextptr(void *next
, mcache_audit_t
*mca
)
6943 if (next
!= NULL
&& !MBUF_IN_MAP(next
) &&
6944 (next
!= (void *)MCACHE_FREE_PATTERN
|| !mclverify
)) {
6945 panic("mcl_audit: buffer %p modified after free at offset 0: "
6946 "%p out of range [%p-%p)\n%s\n",
6947 mca
->mca_addr
, next
, mbutl
, embutl
, mcache_dump_mca(mca
));
6952 /* This function turns on mbuf leak detection */
6954 mleak_activate(void)
6956 mleak_table
.mleak_sample_factor
= MLEAK_SAMPLE_FACTOR
;
6957 PE_parse_boot_argn("mleak_sample_factor",
6958 &mleak_table
.mleak_sample_factor
,
6959 sizeof (mleak_table
.mleak_sample_factor
));
6961 if (mleak_table
.mleak_sample_factor
== 0)
6964 if (mclfindleak
== 0)
6967 vm_size_t alloc_size
=
6968 mleak_alloc_buckets
* sizeof (struct mallocation
);
6969 vm_size_t trace_size
= mleak_trace_buckets
* sizeof (struct mtrace
);
6971 MALLOC(mleak_allocations
, struct mallocation
*, alloc_size
,
6972 M_TEMP
, M_WAITOK
| M_ZERO
);
6973 VERIFY(mleak_allocations
!= NULL
);
6975 MALLOC(mleak_traces
, struct mtrace
*, trace_size
,
6976 M_TEMP
, M_WAITOK
| M_ZERO
);
6977 VERIFY(mleak_traces
!= NULL
);
6979 MALLOC(mleak_stat
, mleak_stat_t
*, MLEAK_STAT_SIZE(MLEAK_NUM_TRACES
),
6980 M_TEMP
, M_WAITOK
| M_ZERO
);
6981 VERIFY(mleak_stat
!= NULL
);
6982 mleak_stat
->ml_cnt
= MLEAK_NUM_TRACES
;
6984 mleak_stat
->ml_isaddr64
= 1;
6985 #endif /* __LP64__ */
6989 mleak_logger(u_int32_t num
, mcache_obj_t
*addr
, boolean_t alloc
)
6993 if (mclfindleak
== 0)
6997 return (mleak_free(addr
));
6999 temp
= atomic_add_32_ov(&mleak_table
.mleak_capture
, 1);
7001 if ((temp
% mleak_table
.mleak_sample_factor
) == 0 && addr
!= NULL
) {
7002 uintptr_t bt
[MLEAK_STACK_DEPTH
];
7003 int logged
= backtrace(bt
, MLEAK_STACK_DEPTH
);
7004 mleak_log(bt
, addr
, logged
, num
);
7009 * This function records the allocation in the mleak_allocations table
7010 * and the backtrace in the mleak_traces table; if allocation slot is in use,
7011 * replace old allocation with new one if the trace slot is in use, return
7012 * (or increment refcount if same trace).
7015 mleak_log(uintptr_t *bt
, mcache_obj_t
*addr
, uint32_t depth
, int num
)
7017 struct mallocation
*allocation
;
7018 struct mtrace
*trace
;
7019 uint32_t trace_index
;
7021 /* Quit if someone else modifying the tables */
7022 if (!lck_mtx_try_lock_spin(mleak_lock
)) {
7023 mleak_table
.total_conflicts
++;
7027 allocation
= &mleak_allocations
[hashaddr((uintptr_t)addr
,
7028 mleak_alloc_buckets
)];
7029 trace_index
= hashbacktrace(bt
, depth
, mleak_trace_buckets
);
7030 trace
= &mleak_traces
[trace_index
];
7032 VERIFY(allocation
<= &mleak_allocations
[mleak_alloc_buckets
- 1]);
7033 VERIFY(trace
<= &mleak_traces
[mleak_trace_buckets
- 1]);
7035 allocation
->hitcount
++;
7039 * If the allocation bucket we want is occupied
7040 * and the occupier has the same trace, just bail.
7042 if (allocation
->element
!= NULL
&&
7043 trace_index
== allocation
->trace_index
) {
7044 mleak_table
.alloc_collisions
++;
7045 lck_mtx_unlock(mleak_lock
);
7050 * Store the backtrace in the traces array;
7051 * Size of zero = trace bucket is free.
7053 if (trace
->allocs
> 0 &&
7054 bcmp(trace
->addr
, bt
, (depth
* sizeof (uintptr_t))) != 0) {
7055 /* Different, unique trace, but the same hash! Bail out. */
7056 trace
->collisions
++;
7057 mleak_table
.trace_collisions
++;
7058 lck_mtx_unlock(mleak_lock
);
7060 } else if (trace
->allocs
> 0) {
7061 /* Same trace, already added, so increment refcount */
7064 /* Found an unused trace bucket, so record the trace here */
7065 if (trace
->depth
!= 0) {
7066 /* this slot previously used but not currently in use */
7067 mleak_table
.trace_overwrites
++;
7069 mleak_table
.trace_recorded
++;
7071 memcpy(trace
->addr
, bt
, (depth
* sizeof (uintptr_t)));
7072 trace
->depth
= depth
;
7073 trace
->collisions
= 0;
7076 /* Step 2: Store the allocation record in the allocations array */
7077 if (allocation
->element
!= NULL
) {
7079 * Replace an existing allocation. No need to preserve
7080 * because only a subset of the allocations are being
7083 mleak_table
.alloc_collisions
++;
7084 } else if (allocation
->trace_index
!= 0) {
7085 mleak_table
.alloc_overwrites
++;
7087 allocation
->element
= addr
;
7088 allocation
->trace_index
= trace_index
;
7089 allocation
->count
= num
;
7090 mleak_table
.alloc_recorded
++;
7091 mleak_table
.outstanding_allocs
++;
7093 lck_mtx_unlock(mleak_lock
);
7098 mleak_free(mcache_obj_t
*addr
)
7100 while (addr
!= NULL
) {
7101 struct mallocation
*allocation
= &mleak_allocations
7102 [hashaddr((uintptr_t)addr
, mleak_alloc_buckets
)];
7104 if (allocation
->element
== addr
&&
7105 allocation
->trace_index
< mleak_trace_buckets
) {
7106 lck_mtx_lock_spin(mleak_lock
);
7107 if (allocation
->element
== addr
&&
7108 allocation
->trace_index
< mleak_trace_buckets
) {
7109 struct mtrace
*trace
;
7110 trace
= &mleak_traces
[allocation
->trace_index
];
7111 /* allocs = 0 means trace bucket is unused */
7112 if (trace
->allocs
> 0)
7114 if (trace
->allocs
== 0)
7116 /* NULL element means alloc bucket is unused */
7117 allocation
->element
= NULL
;
7118 mleak_table
.outstanding_allocs
--;
7120 lck_mtx_unlock(mleak_lock
);
7122 addr
= addr
->obj_next
;
7130 struct mtrace
*swap
;
7132 for(i
= 0; i
< MLEAK_NUM_TRACES
; i
++)
7133 mleak_top_trace
[i
] = NULL
;
7135 for(i
= 0, j
= 0; j
< MLEAK_NUM_TRACES
&& i
< mleak_trace_buckets
; i
++)
7137 if (mleak_traces
[i
].allocs
<= 0)
7140 mleak_top_trace
[j
] = &mleak_traces
[i
];
7141 for (k
= j
; k
> 0; k
--) {
7142 if (mleak_top_trace
[k
]->allocs
<=
7143 mleak_top_trace
[k
-1]->allocs
)
7146 swap
= mleak_top_trace
[k
-1];
7147 mleak_top_trace
[k
-1] = mleak_top_trace
[k
];
7148 mleak_top_trace
[k
] = swap
;
7154 for(; i
< mleak_trace_buckets
; i
++) {
7155 if (mleak_traces
[i
].allocs
<= mleak_top_trace
[j
]->allocs
)
7158 mleak_top_trace
[j
] = &mleak_traces
[i
];
7160 for (k
= j
; k
> 0; k
--) {
7161 if (mleak_top_trace
[k
]->allocs
<=
7162 mleak_top_trace
[k
-1]->allocs
)
7165 swap
= mleak_top_trace
[k
-1];
7166 mleak_top_trace
[k
-1] = mleak_top_trace
[k
];
7167 mleak_top_trace
[k
] = swap
;
7173 mleak_update_stats()
7175 mleak_trace_stat_t
*mltr
;
7178 VERIFY(mleak_stat
!= NULL
);
7180 VERIFY(mleak_stat
->ml_isaddr64
);
7182 VERIFY(!mleak_stat
->ml_isaddr64
);
7183 #endif /* !__LP64__ */
7184 VERIFY(mleak_stat
->ml_cnt
== MLEAK_NUM_TRACES
);
7186 mleak_sort_traces();
7188 mltr
= &mleak_stat
->ml_trace
[0];
7189 bzero(mltr
, sizeof (*mltr
) * MLEAK_NUM_TRACES
);
7190 for (i
= 0; i
< MLEAK_NUM_TRACES
; i
++) {
7193 if (mleak_top_trace
[i
] == NULL
||
7194 mleak_top_trace
[i
]->allocs
== 0)
7197 mltr
->mltr_collisions
= mleak_top_trace
[i
]->collisions
;
7198 mltr
->mltr_hitcount
= mleak_top_trace
[i
]->hitcount
;
7199 mltr
->mltr_allocs
= mleak_top_trace
[i
]->allocs
;
7200 mltr
->mltr_depth
= mleak_top_trace
[i
]->depth
;
7202 VERIFY(mltr
->mltr_depth
<= MLEAK_STACK_DEPTH
);
7203 for (j
= 0; j
< mltr
->mltr_depth
; j
++)
7204 mltr
->mltr_addr
[j
] = mleak_top_trace
[i
]->addr
[j
];
7210 static struct mbtypes
{
7212 const char *mt_name
;
7214 { MT_DATA
, "data" },
7215 { MT_OOBDATA
, "oob data" },
7216 { MT_CONTROL
, "ancillary data" },
7217 { MT_HEADER
, "packet headers" },
7218 { MT_SOCKET
, "socket structures" },
7219 { MT_PCB
, "protocol control blocks" },
7220 { MT_RTABLE
, "routing table entries" },
7221 { MT_HTABLE
, "IMP host table entries" },
7222 { MT_ATABLE
, "address resolution tables" },
7223 { MT_FTABLE
, "fragment reassembly queue headers" },
7224 { MT_SONAME
, "socket names and addresses" },
7225 { MT_SOOPTS
, "socket options" },
7226 { MT_RIGHTS
, "access rights" },
7227 { MT_IFADDR
, "interface addresses" },
7228 { MT_TAG
, "packet tags" },
7232 #define MBUF_DUMP_BUF_CHK() { \
7242 unsigned long totmem
= 0, totfree
= 0, totmbufs
, totused
, totpct
;
7243 u_int32_t m_mbufs
= 0, m_clfree
= 0, m_bigclfree
= 0;
7244 u_int32_t m_mbufclfree
= 0, m_mbufbigclfree
= 0;
7245 u_int32_t m_16kclusters
= 0, m_16kclfree
= 0, m_mbuf16kclfree
= 0;
7246 int nmbtypes
= sizeof (mbstat
.m_mtypes
) / sizeof (short);
7249 mb_class_stat_t
*sp
;
7250 mleak_trace_stat_t
*mltr
;
7251 char *c
= mbuf_dump_buf
;
7252 int i
, k
, clen
= MBUF_DUMP_BUF_SIZE
;
7254 mbuf_dump_buf
[0] = '\0';
7256 /* synchronize all statistics in the mbuf table */
7258 mbuf_mtypes_sync(TRUE
);
7260 sp
= &mb_stat
->mbs_class
[0];
7261 for (i
= 0; i
< mb_stat
->mbs_cnt
; i
++, sp
++) {
7264 if (m_class(i
) == MC_MBUF
) {
7265 m_mbufs
= sp
->mbcl_active
;
7266 } else if (m_class(i
) == MC_CL
) {
7267 m_clfree
= sp
->mbcl_total
- sp
->mbcl_active
;
7268 } else if (m_class(i
) == MC_BIGCL
) {
7269 m_bigclfree
= sp
->mbcl_total
- sp
->mbcl_active
;
7270 } else if (njcl
> 0 && m_class(i
) == MC_16KCL
) {
7271 m_16kclfree
= sp
->mbcl_total
- sp
->mbcl_active
;
7272 m_16kclusters
= sp
->mbcl_total
;
7273 } else if (m_class(i
) == MC_MBUF_CL
) {
7274 m_mbufclfree
= sp
->mbcl_total
- sp
->mbcl_active
;
7275 } else if (m_class(i
) == MC_MBUF_BIGCL
) {
7276 m_mbufbigclfree
= sp
->mbcl_total
- sp
->mbcl_active
;
7277 } else if (njcl
> 0 && m_class(i
) == MC_MBUF_16KCL
) {
7278 m_mbuf16kclfree
= sp
->mbcl_total
- sp
->mbcl_active
;
7281 mem
= sp
->mbcl_ctotal
* sp
->mbcl_size
;
7283 totfree
+= (sp
->mbcl_mc_cached
+ sp
->mbcl_infree
) *
7288 /* adjust free counts to include composite caches */
7289 m_clfree
+= m_mbufclfree
;
7290 m_bigclfree
+= m_mbufbigclfree
;
7291 m_16kclfree
+= m_mbuf16kclfree
;
7294 for (mp
= mbtypes
; mp
->mt_name
!= NULL
; mp
++)
7295 totmbufs
+= mbstat
.m_mtypes
[mp
->mt_type
];
7296 if (totmbufs
> m_mbufs
)
7298 k
= snprintf(c
, clen
, "%lu/%u mbufs in use:\n", totmbufs
, m_mbufs
);
7299 MBUF_DUMP_BUF_CHK();
7301 bzero(&seen
, sizeof (seen
));
7302 for (mp
= mbtypes
; mp
->mt_name
!= NULL
; mp
++) {
7303 if (mbstat
.m_mtypes
[mp
->mt_type
] != 0) {
7304 seen
[mp
->mt_type
] = 1;
7305 k
= snprintf(c
, clen
, "\t%u mbufs allocated to %s\n",
7306 mbstat
.m_mtypes
[mp
->mt_type
], mp
->mt_name
);
7307 MBUF_DUMP_BUF_CHK();
7311 for (i
= 0; i
< nmbtypes
; i
++)
7312 if (!seen
[i
] && mbstat
.m_mtypes
[i
] != 0) {
7313 k
= snprintf(c
, clen
, "\t%u mbufs allocated to "
7314 "<mbuf type %d>\n", mbstat
.m_mtypes
[i
], i
);
7315 MBUF_DUMP_BUF_CHK();
7317 if ((m_mbufs
- totmbufs
) > 0) {
7318 k
= snprintf(c
, clen
, "\t%lu mbufs allocated to caches\n",
7319 m_mbufs
- totmbufs
);
7320 MBUF_DUMP_BUF_CHK();
7322 k
= snprintf(c
, clen
, "%u/%u mbuf 2KB clusters in use\n"
7323 "%u/%u mbuf 4KB clusters in use\n",
7324 (unsigned int)(mbstat
.m_clusters
- m_clfree
),
7325 (unsigned int)mbstat
.m_clusters
,
7326 (unsigned int)(mbstat
.m_bigclusters
- m_bigclfree
),
7327 (unsigned int)mbstat
.m_bigclusters
);
7328 MBUF_DUMP_BUF_CHK();
7331 k
= snprintf(c
, clen
, "%u/%u mbuf %uKB clusters in use\n",
7332 m_16kclusters
- m_16kclfree
, m_16kclusters
,
7334 MBUF_DUMP_BUF_CHK();
7336 totused
= totmem
- totfree
;
7339 } else if (totused
< (ULONG_MAX
/ 100)) {
7340 totpct
= (totused
* 100) / totmem
;
7342 u_long totmem1
= totmem
/ 100;
7343 u_long totused1
= totused
/ 100;
7344 totpct
= (totused1
* 100) / totmem1
;
7346 k
= snprintf(c
, clen
, "%lu KB allocated to network (approx. %lu%% "
7347 "in use)\n", totmem
/ 1024, totpct
);
7348 MBUF_DUMP_BUF_CHK();
7350 /* mbuf leak detection statistics */
7351 mleak_update_stats();
7353 k
= snprintf(c
, clen
, "\nmbuf leak detection table:\n");
7354 MBUF_DUMP_BUF_CHK();
7355 k
= snprintf(c
, clen
, "\ttotal captured: %u (one per %u)\n",
7356 mleak_table
.mleak_capture
/ mleak_table
.mleak_sample_factor
,
7357 mleak_table
.mleak_sample_factor
);
7358 MBUF_DUMP_BUF_CHK();
7359 k
= snprintf(c
, clen
, "\ttotal allocs outstanding: %llu\n",
7360 mleak_table
.outstanding_allocs
);
7361 MBUF_DUMP_BUF_CHK();
7362 k
= snprintf(c
, clen
, "\tnew hash recorded: %llu allocs, %llu traces\n",
7363 mleak_table
.alloc_recorded
, mleak_table
.trace_recorded
);
7364 MBUF_DUMP_BUF_CHK();
7365 k
= snprintf(c
, clen
, "\thash collisions: %llu allocs, %llu traces\n",
7366 mleak_table
.alloc_collisions
, mleak_table
.trace_collisions
);
7367 MBUF_DUMP_BUF_CHK();
7368 k
= snprintf(c
, clen
, "\toverwrites: %llu allocs, %llu traces\n",
7369 mleak_table
.alloc_overwrites
, mleak_table
.trace_overwrites
);
7370 MBUF_DUMP_BUF_CHK();
7371 k
= snprintf(c
, clen
, "\tlock conflicts: %llu\n\n",
7372 mleak_table
.total_conflicts
);
7373 MBUF_DUMP_BUF_CHK();
7375 k
= snprintf(c
, clen
, "top %d outstanding traces:\n",
7376 mleak_stat
->ml_cnt
);
7377 MBUF_DUMP_BUF_CHK();
7378 for (i
= 0; i
< mleak_stat
->ml_cnt
; i
++) {
7379 mltr
= &mleak_stat
->ml_trace
[i
];
7380 k
= snprintf(c
, clen
, "[%d] %llu outstanding alloc(s), "
7381 "%llu hit(s), %llu collision(s)\n", (i
+ 1),
7382 mltr
->mltr_allocs
, mltr
->mltr_hitcount
,
7383 mltr
->mltr_collisions
);
7384 MBUF_DUMP_BUF_CHK();
7387 if (mleak_stat
->ml_isaddr64
)
7388 k
= snprintf(c
, clen
, MB_LEAK_HDR_64
);
7390 k
= snprintf(c
, clen
, MB_LEAK_HDR_32
);
7391 MBUF_DUMP_BUF_CHK();
7393 for (i
= 0; i
< MLEAK_STACK_DEPTH
; i
++) {
7395 k
= snprintf(c
, clen
, "%2d: ", (i
+ 1));
7396 MBUF_DUMP_BUF_CHK();
7397 for (j
= 0; j
< mleak_stat
->ml_cnt
; j
++) {
7398 mltr
= &mleak_stat
->ml_trace
[j
];
7399 if (i
< mltr
->mltr_depth
) {
7400 if (mleak_stat
->ml_isaddr64
) {
7401 k
= snprintf(c
, clen
, "0x%0llx ",
7402 (uint64_t)VM_KERNEL_UNSLIDE(
7403 mltr
->mltr_addr
[i
]));
7405 k
= snprintf(c
, clen
,
7407 (uint32_t)VM_KERNEL_UNSLIDE(
7408 mltr
->mltr_addr
[i
]));
7411 if (mleak_stat
->ml_isaddr64
)
7412 k
= snprintf(c
, clen
,
7413 MB_LEAK_SPACING_64
);
7415 k
= snprintf(c
, clen
,
7416 MB_LEAK_SPACING_32
);
7418 MBUF_DUMP_BUF_CHK();
7420 k
= snprintf(c
, clen
, "\n");
7421 MBUF_DUMP_BUF_CHK();
7424 return (mbuf_dump_buf
);
7427 #undef MBUF_DUMP_BUF_CHK
7430 * Convert between a regular and a packet header mbuf. Caller is responsible
7431 * for setting or clearing M_PKTHDR; this routine does the rest of the work.
7434 m_reinit(struct mbuf
*m
, int hdr
)
7439 VERIFY(!(m
->m_flags
& M_PKTHDR
));
7440 if (!(m
->m_flags
& M_EXT
) &&
7441 (m
->m_data
!= m
->m_dat
|| m
->m_len
> 0)) {
7443 * If there's no external cluster attached and the
7444 * mbuf appears to contain user data, we cannot
7445 * safely convert this to a packet header mbuf,
7446 * as the packet header structure might overlap
7449 printf("%s: cannot set M_PKTHDR on altered mbuf %llx, "
7450 "m_data %llx (expected %llx), "
7451 "m_len %d (expected 0)\n",
7453 (uint64_t)VM_KERNEL_ADDRPERM(m
),
7454 (uint64_t)VM_KERNEL_ADDRPERM(m
->m_data
),
7455 (uint64_t)VM_KERNEL_ADDRPERM(m
->m_dat
), m
->m_len
);
7458 VERIFY((m
->m_flags
& M_EXT
) || m
->m_data
== m
->m_dat
);
7459 m
->m_flags
|= M_PKTHDR
;
7460 MBUF_INIT_PKTHDR(m
);
7463 /* Check for scratch area overflow */
7464 m_redzone_verify(m
);
7465 /* Free the aux data and tags if there is any */
7466 m_tag_delete_chain(m
, NULL
);
7467 m
->m_flags
&= ~M_PKTHDR
;
7474 m_ext_set_prop(struct mbuf
*m
, uint32_t o
, uint32_t n
)
7476 ASSERT(m
->m_flags
& M_EXT
);
7477 return (atomic_test_set_32(&MEXT_PRIV(m
), o
, n
));
7481 m_ext_get_prop(struct mbuf
*m
)
7483 ASSERT(m
->m_flags
& M_EXT
);
7484 return (MEXT_PRIV(m
));
7488 m_ext_paired_is_active(struct mbuf
*m
)
7490 return (MBUF_IS_PAIRED(m
) ? (MEXT_PREF(m
) > MEXT_MINREF(m
)) : 1);
7494 m_ext_paired_activate(struct mbuf
*m
)
7496 struct ext_ref
*rfa
;
7499 m_ext_free_func_t extfree
;
7502 VERIFY(MBUF_IS_PAIRED(m
));
7503 VERIFY(MEXT_REF(m
) == MEXT_MINREF(m
));
7504 VERIFY(MEXT_PREF(m
) == MEXT_MINREF(m
));
7506 hdr
= (m
->m_flags
& M_PKTHDR
);
7508 extbuf
= m
->m_ext
.ext_buf
;
7509 extfree
= m_get_ext_free(m
);
7510 extsize
= m
->m_ext
.ext_size
;
7513 VERIFY(extbuf
!= NULL
&& rfa
!= NULL
);
7516 * Safe to reinitialize packet header tags, since it's
7517 * already taken care of at m_free() time. Similar to
7518 * what's done in m_clattach() for the cluster. Bump
7519 * up MEXT_PREF to indicate activation.
7521 MBUF_INIT(m
, hdr
, type
);
7522 MEXT_INIT(m
, extbuf
, extsize
, extfree
, (caddr_t
)m
, rfa
,
7523 1, 1, 2, EXTF_PAIRED
, MEXT_PRIV(m
), m
);
7527 m_scratch_init(struct mbuf
*m
)
7529 struct pkthdr
*pkt
= &m
->m_pkthdr
;
7531 VERIFY(m
->m_flags
& M_PKTHDR
);
7533 /* See comments in <rdar://problem/14040693> */
7534 if (pkt
->pkt_flags
& PKTF_PRIV_GUARDED
) {
7535 panic_plain("Invalid attempt to modify guarded module-private "
7536 "area: mbuf %p, pkt_flags 0x%x\n", m
, pkt
->pkt_flags
);
7540 bzero(&pkt
->pkt_mpriv
, sizeof (pkt
->pkt_mpriv
));
7544 * This routine is reserved for mbuf_get_driver_scratch(); clients inside
7545 * xnu that intend on utilizing the module-private area should directly
7546 * refer to the pkt_mpriv structure in the pkthdr. They are also expected
7547 * to set and clear PKTF_PRIV_GUARDED, while owning the packet and prior
7548 * to handing it off to another module, respectively.
7551 m_scratch_get(struct mbuf
*m
, u_int8_t
**p
)
7553 struct pkthdr
*pkt
= &m
->m_pkthdr
;
7555 VERIFY(m
->m_flags
& M_PKTHDR
);
7557 /* See comments in <rdar://problem/14040693> */
7558 if (pkt
->pkt_flags
& PKTF_PRIV_GUARDED
) {
7559 panic_plain("Invalid attempt to access guarded module-private "
7560 "area: mbuf %p, pkt_flags 0x%x\n", m
, pkt
->pkt_flags
);
7565 mcache_audit_t
*mca
;
7567 lck_mtx_lock(mbuf_mlock
);
7568 mca
= mcl_audit_buf2mca(MC_MBUF
, (mcache_obj_t
*)m
);
7569 if (mca
->mca_uflags
& MB_SCVALID
)
7570 mcl_audit_scratch(mca
);
7571 lck_mtx_unlock(mbuf_mlock
);
7574 *p
= (u_int8_t
*)&pkt
->pkt_mpriv
;
7575 return (sizeof (pkt
->pkt_mpriv
));
7579 m_redzone_init(struct mbuf
*m
)
7581 VERIFY(m
->m_flags
& M_PKTHDR
);
7583 * Each mbuf has a unique red zone pattern, which is a XOR
7584 * of the red zone cookie and the address of the mbuf.
7586 m
->m_pkthdr
.redzone
= ((u_int32_t
)(uintptr_t)m
) ^ mb_redzone_cookie
;
7590 m_redzone_verify(struct mbuf
*m
)
7592 u_int32_t mb_redzone
;
7594 VERIFY(m
->m_flags
& M_PKTHDR
);
7596 mb_redzone
= ((u_int32_t
)(uintptr_t)m
) ^ mb_redzone_cookie
;
7597 if (m
->m_pkthdr
.redzone
!= mb_redzone
) {
7598 panic("mbuf %p redzone violation with value 0x%x "
7599 "(instead of 0x%x, using cookie 0x%x)\n",
7600 m
, m
->m_pkthdr
.redzone
, mb_redzone
, mb_redzone_cookie
);
7605 __private_extern__
inline void
7606 m_set_ext(struct mbuf
*m
, struct ext_ref
*rfa
, m_ext_free_func_t ext_free
,
7609 VERIFY(m
->m_flags
& M_EXT
);
7611 m
->m_ext
.ext_refflags
=
7612 (struct ext_ref
*)(((uintptr_t)rfa
) ^ mb_obscure_extref
);
7613 if (ext_free
!= NULL
) {
7614 rfa
->ext_token
= ((uintptr_t)&rfa
->ext_token
) ^
7616 m
->m_ext
.ext_free
= (m_ext_free_func_t
)
7617 (((uintptr_t)ext_free
) ^ rfa
->ext_token
);
7618 if (ext_arg
!= NULL
) {
7619 m
->m_ext
.ext_arg
= (((uintptr_t)ext_arg
) ^
7622 m
->m_ext
.ext_arg
= NULL
;
7626 m
->m_ext
.ext_free
= NULL
;
7627 m
->m_ext
.ext_arg
= NULL
;
7631 * If we are going to loose the cookie in ext_token by
7632 * resetting the rfa, we should use the global cookie
7633 * to obscure the ext_free and ext_arg pointers.
7635 if (ext_free
!= NULL
) {
7636 m
->m_ext
.ext_free
= ((uintptr_t)ext_free
^
7637 mb_obscure_extfree
);
7638 if (ext_arg
!= NULL
) {
7639 m
->m_ext
.ext_arg
= ((uintptr_t)ext_arg
^
7640 mb_obscure_extfree
);
7642 m
->m_ext
.ext_arg
= NULL
;
7645 m
->m_ext
.ext_free
= NULL
;
7646 m
->m_ext
.ext_arg
= NULL
;
7648 m
->m_ext
.ext_refflags
= NULL
;
7652 __private_extern__
inline struct ext_ref
*
7653 m_get_rfa(struct mbuf
*m
)
7655 if (m
->m_ext
.ext_refflags
== NULL
)
7658 return ((struct ext_ref
*)(((uintptr_t)m
->m_ext
.ext_refflags
) ^ mb_obscure_extref
));
7661 __private_extern__
inline m_ext_free_func_t
7662 m_get_ext_free(struct mbuf
*m
)
7664 struct ext_ref
*rfa
;
7665 if (m
->m_ext
.ext_free
== NULL
)
7670 return ((uintptr_t)m
->m_ext
.ext_free
^ mb_obscure_extfree
);
7672 return ((m_ext_free_func_t
)(((uintptr_t)m
->m_ext
.ext_free
)
7676 __private_extern__
inline caddr_t
7677 m_get_ext_arg(struct mbuf
*m
)
7679 struct ext_ref
*rfa
;
7680 if (m
->m_ext
.ext_arg
== NULL
)
7685 return ((uintptr_t)m
->m_ext
.ext_arg
^ mb_obscure_extfree
);
7687 return ((caddr_t
)(((uintptr_t)m
->m_ext
.ext_arg
) ^
7693 * Send a report of mbuf usage if the usage is at least 6% of max limit
7694 * or if there has been at least 3% increase since the last report.
7696 * The values 6% and 3% are chosen so that we can do simple arithmetic
7697 * with shift operations.
7700 mbuf_report_usage(mbuf_class_t cl
)
7702 /* if a report is already in progress, nothing to do */
7703 if (mb_peak_newreport
)
7706 if (m_total(cl
) > m_peak(cl
) &&
7707 m_total(cl
) >= (m_maxlimit(cl
) >> 4) &&
7708 (m_total(cl
) - m_peak(cl
)) >= (m_peak(cl
) >> 5))
7713 __private_extern__
void
7714 mbuf_report_peak_usage(void)
7718 struct nstat_sysinfo_data ns_data
;
7719 uint32_t memreleased
= 0;
7721 uptime
= net_uptime();
7722 lck_mtx_lock(mbuf_mlock
);
7724 /* Generate an initial report after 1 week of uptime */
7725 if (!mb_peak_firstreport
&&
7726 uptime
> MBUF_PEAK_FIRST_REPORT_THRESHOLD
) {
7727 mb_peak_newreport
= TRUE
;
7728 mb_peak_firstreport
= TRUE
;
7731 if (!mb_peak_newreport
) {
7732 lck_mtx_unlock(mbuf_mlock
);
7737 * Since a report is being generated before 1 week,
7738 * we do not need to force another one later
7740 if (uptime
< MBUF_PEAK_FIRST_REPORT_THRESHOLD
)
7741 mb_peak_firstreport
= TRUE
;
7743 for (i
= 0; i
< NELEM(mbuf_table
); i
++) {
7744 m_peak(m_class(i
)) = m_total(m_class(i
));
7745 memreleased
+= m_release_cnt(i
);
7746 m_release_cnt(i
) = 0;
7748 mb_peak_newreport
= FALSE
;
7749 lck_mtx_unlock(mbuf_mlock
);
7751 bzero(&ns_data
, sizeof(ns_data
));
7752 ns_data
.flags
= NSTAT_SYSINFO_MBUF_STATS
;
7753 ns_data
.u
.mb_stats
.total_256b
= m_peak(MC_MBUF
);
7754 ns_data
.u
.mb_stats
.total_2kb
= m_peak(MC_CL
);
7755 ns_data
.u
.mb_stats
.total_4kb
= m_peak(MC_BIGCL
);
7756 ns_data
.u
.mb_stats
.total_16kb
= m_peak(MC_16KCL
);
7757 ns_data
.u
.mb_stats
.sbmb_total
= total_sbmb_cnt_peak
;
7758 ns_data
.u
.mb_stats
.sb_atmbuflimit
= sbmb_limreached
;
7759 ns_data
.u
.mb_stats
.draincnt
= mbstat
.m_drain
;
7760 ns_data
.u
.mb_stats
.memreleased
= memreleased
;
7761 ns_data
.u
.mb_stats
.sbmb_floor
= total_sbmb_cnt_floor
;
7763 nstat_sysinfo_send_data(&ns_data
);
7766 * Reset the floor whenever we report a new
7767 * peak to track the trend (increase peek usage
7768 * is not a leak if mbufs get released
7769 * between reports and the floor stays low)
7771 total_sbmb_cnt_floor
= total_sbmb_cnt_peak
;
7775 * Called by the VM when there's memory pressure.
7777 __private_extern__
void
7781 mcl_slab_t
*sp
, *sp_tmp
, *nsp
;
7782 unsigned int num
, k
, interval
, released
= 0;
7783 unsigned long total_mem
= 0, use_mem
= 0;
7784 boolean_t ret
, purge_caches
= FALSE
;
7788 static uint64_t last_drain
= 0;
7789 static unsigned char scratch
[32];
7790 static ppnum_t scratch_pa
= 0;
7792 if (mb_drain_maxint
== 0 || mb_waiters
)
7794 if (scratch_pa
== 0) {
7795 bzero(scratch
, sizeof(scratch
));
7796 scratch_pa
= pmap_find_phys(kernel_pmap
, (addr64_t
)scratch
);
7798 } else if (mclverify
) {
7800 * Panic if a driver wrote to our scratch memory.
7802 for (k
= 0; k
< sizeof(scratch
); k
++)
7804 panic("suspect DMA to freed address");
7807 * Don't free memory too often as that could cause excessive
7808 * waiting times for mbufs. Purge caches if we were asked to drain
7809 * in the last 5 minutes.
7811 lck_mtx_lock(mbuf_mlock
);
7812 if (last_drain
== 0) {
7813 last_drain
= net_uptime();
7814 lck_mtx_unlock(mbuf_mlock
);
7817 interval
= net_uptime() - last_drain
;
7818 if (interval
<= mb_drain_maxint
) {
7819 lck_mtx_unlock(mbuf_mlock
);
7822 if (interval
<= mb_drain_maxint
* 5)
7823 purge_caches
= TRUE
;
7824 last_drain
= net_uptime();
7826 * Don't free any memory if we're using 60% or more.
7828 for (mc
= 0; mc
< NELEM(mbuf_table
); mc
++) {
7829 total_mem
+= m_total(mc
) * m_maxsize(mc
);
7830 use_mem
+= m_active(mc
) * m_maxsize(mc
);
7832 per
= (use_mem
* 100) / total_mem
;
7834 lck_mtx_unlock(mbuf_mlock
);
7838 * Purge all the caches. This effectively disables
7839 * caching for a few seconds, but the mbuf worker thread will
7840 * re-enable them again.
7842 if (purge_caches
== TRUE
)
7843 for (mc
= 0; mc
< NELEM(mbuf_table
); mc
++) {
7844 if (m_total(mc
) < m_avgtotal(mc
))
7846 lck_mtx_unlock(mbuf_mlock
);
7847 ret
= mcache_purge_cache(m_cache(mc
), FALSE
);
7848 lck_mtx_lock(mbuf_mlock
);
7853 * Move the objects from the composite class freelist to
7854 * the rudimentary slabs list, but keep at least 10% of the average
7855 * total in the freelist.
7857 for (mc
= 0; mc
< NELEM(mbuf_table
); mc
++) {
7858 while (m_cobjlist(mc
) &&
7859 m_total(mc
) < m_avgtotal(mc
) &&
7860 m_infree(mc
) > 0.1 * m_avgtotal(mc
) + m_minlimit(mc
)) {
7861 obj
= m_cobjlist(mc
);
7862 m_cobjlist(mc
) = obj
->obj_next
;
7863 obj
->obj_next
= NULL
;
7864 num
= cslab_free(mc
, obj
, 1);
7868 /* cslab_free() handles m_total */
7872 * Free the buffers present in the slab list up to 10% of the total
7873 * average per class.
7875 * We walk the list backwards in an attempt to reduce fragmentation.
7877 for (mc
= NELEM(mbuf_table
) - 1; (int)mc
>= 0; mc
--) {
7878 TAILQ_FOREACH_SAFE(sp
, &m_slablist(mc
), sl_link
, sp_tmp
) {
7880 * Process only unused slabs occupying memory.
7882 if (sp
->sl_refcnt
!= 0 || sp
->sl_len
== 0 ||
7883 sp
->sl_base
== NULL
)
7885 if (m_total(mc
) < m_avgtotal(mc
) ||
7886 m_infree(mc
) < 0.1 * m_avgtotal(mc
) + m_minlimit(mc
))
7888 slab_remove(sp
, mc
);
7891 m_infree(mc
) -= NMBPG
;
7892 m_total(mc
) -= NMBPG
;
7893 if (mclaudit
!= NULL
)
7894 mcl_audit_free(sp
->sl_base
, NMBPG
);
7897 m_infree(mc
) -= NCLPG
;
7898 m_total(mc
) -= NCLPG
;
7899 if (mclaudit
!= NULL
)
7900 mcl_audit_free(sp
->sl_base
, NMBPG
);
7904 m_infree(mc
) -= NBCLPG
;
7905 m_total(mc
) -= NBCLPG
;
7906 if (mclaudit
!= NULL
)
7907 mcl_audit_free(sp
->sl_base
, NMBPG
);
7913 for (nsp
= sp
, k
= 1; k
< NSLABSP16KB
; k
++) {
7915 VERIFY(nsp
->sl_refcnt
== 0 &&
7916 nsp
->sl_base
!= NULL
&&
7918 slab_init(nsp
, 0, 0, NULL
, NULL
, 0, 0,
7922 if (mclaudit
!= NULL
)
7923 mcl_audit_free(sp
->sl_base
, 1);
7927 * The composite classes have their own
7928 * freelist (m_cobjlist), so we only
7929 * process rudimentary classes here.
7933 m_release_cnt(mc
) += m_size(mc
);
7934 released
+= m_size(mc
);
7935 VERIFY(sp
->sl_base
!= NULL
&&
7936 sp
->sl_len
>= PAGE_SIZE
);
7937 offset
= MTOPG(sp
->sl_base
);
7939 * Make sure the IOMapper points to a valid, but
7940 * bogus, address. This should prevent further DMA
7941 * accesses to freed memory.
7943 IOMapperInsertPage(mcl_paddr_base
, offset
, scratch_pa
);
7944 mcl_paddr
[offset
] = 0;
7945 kmem_free(mb_map
, (vm_offset_t
)sp
->sl_base
,
7947 slab_init(sp
, 0, 0, NULL
, NULL
, 0, 0, 0);
7952 mbstat
.m_bigclusters
= m_total(MC_BIGCL
);
7953 mbstat
.m_clusters
= m_total(MC_CL
);
7954 mbstat
.m_mbufs
= m_total(MC_MBUF
);
7956 mbuf_mtypes_sync(TRUE
);
7957 lck_mtx_unlock(mbuf_mlock
);
7961 m_drain_force_sysctl SYSCTL_HANDLER_ARGS
7963 #pragma unused(arg1, arg2)
7966 err
= sysctl_handle_int(oidp
, &val
, 0, req
);
7967 if (err
!= 0 || req
->newptr
== USER_ADDR_NULL
)
7975 SYSCTL_DECL(_kern_ipc
);
7976 SYSCTL_PROC(_kern_ipc
, KIPC_MBSTAT
, mbstat
,
7977 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
7978 0, 0, mbstat_sysctl
, "S,mbstat", "");
7979 SYSCTL_PROC(_kern_ipc
, OID_AUTO
, mb_stat
,
7980 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
7981 0, 0, mb_stat_sysctl
, "S,mb_stat", "");
7982 SYSCTL_PROC(_kern_ipc
, OID_AUTO
, mleak_top_trace
,
7983 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
7984 0, 0, mleak_top_trace_sysctl
, "S,mb_top_trace", "");
7985 SYSCTL_PROC(_kern_ipc
, OID_AUTO
, mleak_table
,
7986 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
7987 0, 0, mleak_table_sysctl
, "S,mleak_table", "");
7988 SYSCTL_INT(_kern_ipc
, OID_AUTO
, mleak_sample_factor
,
7989 CTLFLAG_RW
| CTLFLAG_LOCKED
, &mleak_table
.mleak_sample_factor
, 0, "");
7990 SYSCTL_INT(_kern_ipc
, OID_AUTO
, mb_normalized
,
7991 CTLFLAG_RD
| CTLFLAG_LOCKED
, &mb_normalized
, 0, "");
7992 SYSCTL_INT(_kern_ipc
, OID_AUTO
, mb_watchdog
,
7993 CTLFLAG_RW
| CTLFLAG_LOCKED
, &mb_watchdog
, 0, "");
7994 SYSCTL_PROC(_kern_ipc
, OID_AUTO
, mb_drain_force
,
7995 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, NULL
, 0,
7996 m_drain_force_sysctl
, "I",
7997 "Forces the mbuf garbage collection to run");
7998 SYSCTL_INT(_kern_ipc
, OID_AUTO
, mb_drain_maxint
,
7999 CTLFLAG_RW
| CTLFLAG_LOCKED
, &mb_drain_maxint
, 0,
8000 "Minimum time interval between garbage collection");