]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/zalloc.c
dc9ea000ca8eba965a977f601faf39c06dbbbd61
[apple/xnu.git] / osfmk / kern / zalloc.c
1 /*
2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/zalloc.c
60 * Author: Avadis Tevanian, Jr.
61 *
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
64 */
65 #include <zone_debug.h>
66 #include <zone_alias_addr.h>
67
68 #include <mach/mach_types.h>
69 #include <mach/vm_param.h>
70 #include <mach/kern_return.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/task_server.h>
73 #include <mach/machine/vm_types.h>
74 #include <mach_debug/zone_info.h>
75 #include <mach/vm_map.h>
76
77 #include <kern/kern_types.h>
78 #include <kern/assert.h>
79 #include <kern/host.h>
80 #include <kern/macro_help.h>
81 #include <kern/sched.h>
82 #include <kern/locks.h>
83 #include <kern/sched_prim.h>
84 #include <kern/misc_protos.h>
85 #include <kern/thread_call.h>
86 #include <kern/zalloc.h>
87 #include <kern/kalloc.h>
88
89 #include <vm/pmap.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_page.h>
93
94 #include <pexpert/pexpert.h>
95
96 #include <machine/machparam.h>
97
98 #include <libkern/OSDebug.h>
99 #include <libkern/OSAtomic.h>
100 #include <sys/kdebug.h>
101
102 /*
103 * Zone Corruption Debugging
104 *
105 * We perform three methods to detect use of a zone element after it's been freed. These
106 * checks are enabled for every N'th element (counted per-zone) by specifying
107 * "zp-factor=N" as a boot-arg. To turn this feature off, set "zp-factor=0" or "-no-zp".
108 *
109 * (1) Range-check the free-list "next" pointer for sanity.
110 * (2) Store the pointer in two different words, one at the beginning of the freed element
111 * and one at the end, and compare them against each other when re-using the element,
112 * to detect modifications.
113 * (3) Poison the freed memory by overwriting it with 0xdeadbeef, and check it when the
114 * memory is being reused to make sure it is still poisoned.
115 *
116 * As a result, each element (that is large enough to hold this data inside) must be marked
117 * as either "ZP_POISONED" or "ZP_NOT_POISONED" in the first integer within the would-be
118 * poisoned segment after the first free-list pointer.
119 *
120 * Performance slowdown is inversely proportional to the frequency with which you check
121 * (as would be expected), with a 4-5% hit around N=1, down to ~0.3% at N=16 and just
122 * "noise" at N=32 and higher. You can expect to find a 100% reproducible
123 * bug in an average of N tries, with a standard deviation of about N, but you will probably
124 * want to set "zp-factor=1" or "-zp" if you are attempting to reproduce a known bug.
125 *
126 *
127 * Zone corruption logging
128 *
129 * You can also track where corruptions come from by using the boot-arguments:
130 * "zlog=<zone name to log> -zc". Search for "Zone corruption logging" later in this
131 * document for more implementation and usage information.
132 */
133
134 #define ZP_POISON 0xdeadbeef
135 #define ZP_POISONED 0xfeedface
136 #define ZP_NOT_POISONED 0xbaddecaf
137
138 #if CONFIG_EMBEDDED
139 #define ZP_DEFAULT_SAMPLING_FACTOR 0
140 #else /* CONFIG_EMBEDDED */
141 #define ZP_DEFAULT_SAMPLING_FACTOR 16
142 #endif /* CONFIG_EMBEDDED */
143
144 uint32_t free_check_sample_factor = 0; /* set by zp-factor=N boot arg */
145 boolean_t corruption_debug_flag = FALSE; /* enabled by "-zc" boot-arg */
146
147 /*
148 * Zone checking helper macro.
149 */
150 #define is_kernel_data_addr(a) (!(a) || ((a) >= vm_min_kernel_address && !((a) & 0x3)))
151
152 /*
153 * Frees the specified element, which is within the specified zone. If this
154 * element should be poisoned and its free list checker should be set, both are
155 * done here. These checks will only be enabled if the element size is at least
156 * large enough to hold two vm_offset_t's and one uint32_t (to enable both types
157 * of checks).
158 */
159 static inline void
160 free_to_zone(zone_t zone, void *elem) {
161 /* get the index of the first uint32_t beyond the 'next' pointer */
162 unsigned int i = sizeof(vm_offset_t) / sizeof(uint32_t);
163
164 /* should we run checks on this piece of memory? */
165 if (free_check_sample_factor != 0 &&
166 zone->free_check_count++ % free_check_sample_factor == 0 &&
167 zone->elem_size >= (2 * sizeof(vm_offset_t) + sizeof(uint32_t))) {
168 zone->free_check_count = 1;
169 ((uint32_t *) elem)[i] = ZP_POISONED;
170 for (i++; i < zone->elem_size / sizeof(uint32_t); i++) {
171 ((uint32_t *) elem)[i] = ZP_POISON;
172 }
173 ((vm_offset_t *) elem)[((zone->elem_size)/sizeof(vm_offset_t))-1] = zone->free_elements;
174 } else {
175 ((uint32_t *) elem)[i] = ZP_NOT_POISONED;
176 }
177
178 /* maintain free list and decrement number of active objects in zone */
179 ((vm_offset_t *) elem)[0] = zone->free_elements;
180 zone->free_elements = (vm_offset_t) elem;
181 zone->count--;
182 }
183
184 /*
185 * Allocates an element from the specifed zone, storing its address in the
186 * return arg. This function will look for corruptions revealed through zone
187 * poisoning and free list checks.
188 */
189 static inline void
190 alloc_from_zone(zone_t zone, void **ret) {
191 void *elem = (void *) zone->free_elements;
192 if (elem != NULL) {
193 /* get the index of the first uint32_t beyond the 'next' pointer */
194 unsigned int i = sizeof(vm_offset_t) / sizeof(uint32_t);
195
196 /* first int in data section must be ZP_POISONED or ZP_NOT_POISONED */
197 if (((uint32_t *) elem)[i] == ZP_POISONED &&
198 zone->elem_size >= (2 * sizeof(vm_offset_t) + sizeof(uint32_t))) {
199 /* check the free list pointers */
200 if (!is_kernel_data_addr(((vm_offset_t *) elem)[0]) ||
201 ((vm_offset_t *) elem)[0] !=
202 ((vm_offset_t *) elem)[(zone->elem_size/sizeof(vm_offset_t))-1]) {
203 panic("a freed zone element has been modified in zone: %s",
204 zone->zone_name);
205 }
206
207 /* check for poisoning in free space */
208 for (i++;
209 i < zone->elem_size / sizeof(uint32_t) -
210 sizeof(vm_offset_t) / sizeof(uint32_t);
211 i++) {
212 if (((uint32_t *) elem)[i] != ZP_POISON) {
213 panic("a freed zone element has been modified in zone: %s",
214 zone->zone_name);
215 }
216 }
217 } else if (((uint32_t *) elem)[i] != ZP_NOT_POISONED) {
218 panic("a freed zone element has been modified in zone: %s",
219 zone->zone_name);
220 }
221
222 zone->count++;
223 zone->sum_count++;
224 zone->free_elements = ((vm_offset_t *) elem)[0];
225 }
226 *ret = elem;
227 }
228
229
230 /*
231 * Fake zones for things that want to report via zprint but are not actually zones.
232 */
233 struct fake_zone_info {
234 const char* name;
235 void (*init)(int);
236 void (*query)(int *,
237 vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *,
238 uint64_t *, int *, int *, int *);
239 };
240
241 static const struct fake_zone_info fake_zones[] = {
242 {
243 .name = "kernel_stacks",
244 .init = stack_fake_zone_init,
245 .query = stack_fake_zone_info,
246 },
247 {
248 .name = "page_tables",
249 .init = pt_fake_zone_init,
250 .query = pt_fake_zone_info,
251 },
252 {
253 .name = "kalloc.large",
254 .init = kalloc_fake_zone_init,
255 .query = kalloc_fake_zone_info,
256 },
257 };
258 static const unsigned int num_fake_zones =
259 sizeof (fake_zones) / sizeof (fake_zones[0]);
260
261 /*
262 * Zone info options
263 */
264 boolean_t zinfo_per_task = FALSE; /* enabled by -zinfop in boot-args */
265 #define ZINFO_SLOTS 200 /* for now */
266 #define ZONES_MAX (ZINFO_SLOTS - num_fake_zones - 1)
267
268 /*
269 * Support for garbage collection of unused zone pages
270 *
271 * The kernel virtually allocates the "zone map" submap of the kernel
272 * map. When an individual zone needs more storage, memory is allocated
273 * out of the zone map, and the two-level "zone_page_table" is
274 * on-demand expanded so that it has entries for those pages.
275 * zone_page_init()/zone_page_alloc() initialize "alloc_count"
276 * to the number of zone elements that occupy the zone page (which may
277 * be a minimum of 1, including if a zone element spans multiple
278 * pages).
279 *
280 * Asynchronously, the zone_gc() logic attempts to walk zone free
281 * lists to see if all the elements on a zone page are free. If
282 * "collect_count" (which it increments during the scan) matches
283 * "alloc_count", the zone page is a candidate for collection and the
284 * physical page is returned to the VM system. During this process, the
285 * first word of the zone page is re-used to maintain a linked list of
286 * to-be-collected zone pages.
287 */
288 typedef uint32_t zone_page_index_t;
289 #define ZONE_PAGE_INDEX_INVALID ((zone_page_index_t)0xFFFFFFFFU)
290
291 struct zone_page_table_entry {
292 volatile uint16_t alloc_count;
293 volatile uint16_t collect_count;
294 };
295
296 #define ZONE_PAGE_USED 0
297 #define ZONE_PAGE_UNUSED 0xffff
298
299 /* Forwards */
300 void zone_page_init(
301 vm_offset_t addr,
302 vm_size_t size);
303
304 void zone_page_alloc(
305 vm_offset_t addr,
306 vm_size_t size);
307
308 void zone_page_free_element(
309 zone_page_index_t *free_page_head,
310 zone_page_index_t *free_page_tail,
311 vm_offset_t addr,
312 vm_size_t size);
313
314 void zone_page_collect(
315 vm_offset_t addr,
316 vm_size_t size);
317
318 boolean_t zone_page_collectable(
319 vm_offset_t addr,
320 vm_size_t size);
321
322 void zone_page_keep(
323 vm_offset_t addr,
324 vm_size_t size);
325
326 void zalloc_async(
327 thread_call_param_t p0,
328 thread_call_param_t p1);
329
330 void zone_display_zprint( void );
331
332 vm_map_t zone_map = VM_MAP_NULL;
333
334 zone_t zone_zone = ZONE_NULL; /* the zone containing other zones */
335
336 zone_t zinfo_zone = ZONE_NULL; /* zone of per-task zone info */
337
338 /*
339 * The VM system gives us an initial chunk of memory.
340 * It has to be big enough to allocate the zone_zone
341 * all the way through the pmap zone.
342 */
343
344 vm_offset_t zdata;
345 vm_size_t zdata_size;
346
347 #define zone_wakeup(zone) thread_wakeup((event_t)(zone))
348 #define zone_sleep(zone) \
349 (void) lck_mtx_sleep(&(zone)->lock, LCK_SLEEP_SPIN, (event_t)(zone), THREAD_UNINT);
350
351
352 #define lock_zone_init(zone) \
353 MACRO_BEGIN \
354 char _name[32]; \
355 (void) snprintf(_name, sizeof (_name), "zone.%s", (zone)->zone_name); \
356 lck_grp_attr_setdefault(&(zone)->lock_grp_attr); \
357 lck_grp_init(&(zone)->lock_grp, _name, &(zone)->lock_grp_attr); \
358 lck_attr_setdefault(&(zone)->lock_attr); \
359 lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \
360 &(zone)->lock_grp, &(zone)->lock_attr); \
361 MACRO_END
362
363 #define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock)
364
365 /*
366 * Garbage collection map information
367 */
368 #define ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE (32)
369 struct zone_page_table_entry * volatile zone_page_table[ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE];
370 vm_size_t zone_page_table_used_size;
371 vm_offset_t zone_map_min_address;
372 vm_offset_t zone_map_max_address;
373 unsigned int zone_pages;
374 unsigned int zone_page_table_second_level_size; /* power of 2 */
375 unsigned int zone_page_table_second_level_shift_amount;
376
377 #define zone_page_table_first_level_slot(x) ((x) >> zone_page_table_second_level_shift_amount)
378 #define zone_page_table_second_level_slot(x) ((x) & (zone_page_table_second_level_size - 1))
379
380 void zone_page_table_expand(zone_page_index_t pindex);
381 struct zone_page_table_entry *zone_page_table_lookup(zone_page_index_t pindex);
382
383 /*
384 * Exclude more than one concurrent garbage collection
385 */
386 decl_lck_mtx_data(, zone_gc_lock)
387
388 lck_attr_t zone_lck_attr;
389 lck_grp_t zone_lck_grp;
390 lck_grp_attr_t zone_lck_grp_attr;
391 lck_mtx_ext_t zone_lck_ext;
392
393 #if !ZONE_ALIAS_ADDR
394 #define from_zone_map(addr, size) \
395 ((vm_offset_t)(addr) >= zone_map_min_address && \
396 ((vm_offset_t)(addr) + size -1) < zone_map_max_address)
397 #else
398 #define from_zone_map(addr, size) \
399 ((vm_offset_t)(zone_virtual_addr((vm_map_address_t)(uintptr_t)addr)) >= zone_map_min_address && \
400 ((vm_offset_t)(zone_virtual_addr((vm_map_address_t)(uintptr_t)addr)) + size -1) < zone_map_max_address)
401 #endif
402
403 /*
404 * Protects first_zone, last_zone, num_zones,
405 * and the next_zone field of zones.
406 */
407 decl_simple_lock_data(, all_zones_lock)
408 zone_t first_zone;
409 zone_t *last_zone;
410 unsigned int num_zones;
411
412 boolean_t zone_gc_allowed = TRUE;
413 boolean_t zone_gc_forced = FALSE;
414 boolean_t panic_include_zprint = FALSE;
415 boolean_t zone_gc_allowed_by_time_throttle = TRUE;
416
417 /*
418 * Zone leak debugging code
419 *
420 * When enabled, this code keeps a log to track allocations to a particular zone that have not
421 * yet been freed. Examining this log will reveal the source of a zone leak. The log is allocated
422 * only when logging is enabled, so there is no effect on the system when it's turned off. Logging is
423 * off by default.
424 *
425 * Enable the logging via the boot-args. Add the parameter "zlog=<zone>" to boot-args where <zone>
426 * is the name of the zone you wish to log.
427 *
428 * This code only tracks one zone, so you need to identify which one is leaking first.
429 * Generally, you'll know you have a leak when you get a "zalloc retry failed 3" panic from the zone
430 * garbage collector. Note that the zone name printed in the panic message is not necessarily the one
431 * containing the leak. So do a zprint from gdb and locate the zone with the bloated size. This
432 * is most likely the problem zone, so set zlog in boot-args to this zone name, reboot and re-run the test. The
433 * next time it panics with this message, examine the log using the kgmacros zstack, findoldest and countpcs.
434 * See the help in the kgmacros for usage info.
435 *
436 *
437 * Zone corruption logging
438 *
439 * Logging can also be used to help identify the source of a zone corruption. First, identify the zone
440 * that is being corrupted, then add "-zc zlog=<zone name>" to the boot-args. When -zc is used in conjunction
441 * with zlog, it changes the logging style to track both allocations and frees to the zone. So when the
442 * corruption is detected, examining the log will show you the stack traces of the callers who last allocated
443 * and freed any particular element in the zone. Use the findelem kgmacro with the address of the element that's been
444 * corrupted to examine its history. This should lead to the source of the corruption.
445 */
446
447 static int log_records; /* size of the log, expressed in number of records */
448
449 #define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */
450
451 static char zone_name_to_log[MAX_ZONE_NAME] = ""; /* the zone name we're logging, if any */
452
453 /*
454 * The number of records in the log is configurable via the zrecs parameter in boot-args. Set this to
455 * the number of records you want in the log. For example, "zrecs=1000" sets it to 1000 records. Note
456 * that the larger the size of the log, the slower the system will run due to linear searching in the log,
457 * but one doesn't generally care about performance when tracking down a leak. The log is capped at 8000
458 * records since going much larger than this tends to make the system unresponsive and unbootable on small
459 * memory configurations. The default value is 4000 records.
460 */
461
462 #if defined(__LP64__)
463 #define ZRECORDS_MAX 128000 /* Max records allowed in the log */
464 #else
465 #define ZRECORDS_MAX 8000 /* Max records allowed in the log */
466 #endif
467 #define ZRECORDS_DEFAULT 4000 /* default records in log if zrecs is not specificed in boot-args */
468
469 /*
470 * Each record in the log contains a pointer to the zone element it refers to, a "time" number that allows
471 * the records to be ordered chronologically, and a small array to hold the pc's from the stack trace. A
472 * record is added to the log each time a zalloc() is done in the zone_of_interest. For leak debugging,
473 * the record is cleared when a zfree() is done. For corruption debugging, the log tracks both allocs and frees.
474 * If the log fills, old records are replaced as if it were a circular buffer.
475 */
476
477 struct zrecord {
478 void *z_element; /* the element that was zalloc'ed of zfree'ed */
479 uint32_t z_opcode:1, /* whether it was a zalloc or zfree */
480 z_time:31; /* time index when operation was done */
481 void *z_pc[MAX_ZTRACE_DEPTH]; /* stack trace of caller */
482 };
483
484 /*
485 * Opcodes for the z_opcode field:
486 */
487
488 #define ZOP_ALLOC 1
489 #define ZOP_FREE 0
490
491 /*
492 * The allocation log and all the related variables are protected by the zone lock for the zone_of_interest
493 */
494
495 static struct zrecord *zrecords; /* the log itself, dynamically allocated when logging is enabled */
496 static int zcurrent = 0; /* index of the next slot in the log to use */
497 static int zrecorded = 0; /* number of allocations recorded in the log */
498 static unsigned int ztime = 0; /* a timestamp of sorts */
499 static zone_t zone_of_interest = NULL; /* the zone being watched; corresponds to zone_name_to_log */
500
501 /*
502 * Decide if we want to log this zone by doing a string compare between a zone name and the name
503 * of the zone to log. Return true if the strings are equal, false otherwise. Because it's not
504 * possible to include spaces in strings passed in via the boot-args, a period in the logname will
505 * match a space in the zone name.
506 */
507
508 static int
509 log_this_zone(const char *zonename, const char *logname)
510 {
511 int len;
512 const char *zc = zonename;
513 const char *lc = logname;
514
515 /*
516 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
517 */
518
519 for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
520
521 /*
522 * If the current characters don't match, check for a space in
523 * in the zone name and a corresponding period in the log name.
524 * If that's not there, then the strings don't match.
525 */
526
527 if (*zc != *lc && !(*zc == ' ' && *lc == '.'))
528 break;
529
530 /*
531 * The strings are equal so far. If we're at the end, then it's a match.
532 */
533
534 if (*zc == '\0')
535 return TRUE;
536 }
537
538 return FALSE;
539 }
540
541
542 /*
543 * Test if we want to log this zalloc/zfree event. We log if this is the zone we're interested in and
544 * the buffer for the records has been allocated.
545 */
546
547 #define DO_LOGGING(z) (zrecords && (z) == zone_of_interest)
548
549 extern boolean_t zlog_ready;
550
551 #if CONFIG_ZLEAKS
552 #pragma mark -
553 #pragma mark Zone Leak Detection
554
555 /*
556 * The zone leak detector, abbreviated 'zleak', keeps track of a subset of the currently outstanding
557 * allocations made by the zone allocator. Every zleak_sample_factor allocations in each zone, we capture a
558 * backtrace. Every free, we examine the table and determine if the allocation was being tracked,
559 * and stop tracking it if it was being tracked.
560 *
561 * We track the allocations in the zallocations hash table, which stores the address that was returned from
562 * the zone allocator. Each stored entry in the zallocations table points to an entry in the ztraces table, which
563 * stores the backtrace associated with that allocation. This provides uniquing for the relatively large
564 * backtraces - we don't store them more than once.
565 *
566 * Data collection begins when the zone map is 50% full, and only occurs for zones that are taking up
567 * a large amount of virtual space.
568 */
569 #define ZLEAK_STATE_ENABLED 0x01 /* Zone leak monitoring should be turned on if zone_map fills up. */
570 #define ZLEAK_STATE_ACTIVE 0x02 /* We are actively collecting traces. */
571 #define ZLEAK_STATE_ACTIVATING 0x04 /* Some thread is doing setup; others should move along. */
572 #define ZLEAK_STATE_FAILED 0x08 /* Attempt to allocate tables failed. We will not try again. */
573 uint32_t zleak_state = 0; /* State of collection, as above */
574
575 boolean_t panic_include_ztrace = FALSE; /* Enable zleak logging on panic */
576 vm_size_t zleak_global_tracking_threshold; /* Size of zone map at which to start collecting data */
577 vm_size_t zleak_per_zone_tracking_threshold; /* Size a zone will have before we will collect data on it */
578 unsigned int zleak_sample_factor = 1000; /* Allocations per sample attempt */
579
580 /*
581 * Counters for allocation statistics.
582 */
583
584 /* Times two active records want to occupy the same spot */
585 unsigned int z_alloc_collisions = 0;
586 unsigned int z_trace_collisions = 0;
587
588 /* Times a new record lands on a spot previously occupied by a freed allocation */
589 unsigned int z_alloc_overwrites = 0;
590 unsigned int z_trace_overwrites = 0;
591
592 /* Times a new alloc or trace is put into the hash table */
593 unsigned int z_alloc_recorded = 0;
594 unsigned int z_trace_recorded = 0;
595
596 /* Times zleak_log returned false due to not being able to acquire the lock */
597 unsigned int z_total_conflicts = 0;
598
599
600 #pragma mark struct zallocation
601 /*
602 * Structure for keeping track of an allocation
603 * An allocation bucket is in use if its element is not NULL
604 */
605 struct zallocation {
606 uintptr_t za_element; /* the element that was zalloc'ed or zfree'ed, NULL if bucket unused */
607 vm_size_t za_size; /* how much memory did this allocation take up? */
608 uint32_t za_trace_index; /* index into ztraces for backtrace associated with allocation */
609 /* TODO: #if this out */
610 uint32_t za_hit_count; /* for determining effectiveness of hash function */
611 };
612
613 /* Size must be a power of two for the zhash to be able to just mask off bits instead of mod */
614 uint32_t zleak_alloc_buckets = CONFIG_ZLEAK_ALLOCATION_MAP_NUM;
615 uint32_t zleak_trace_buckets = CONFIG_ZLEAK_TRACE_MAP_NUM;
616
617 vm_size_t zleak_max_zonemap_size;
618
619 /* Hashmaps of allocations and their corresponding traces */
620 static struct zallocation* zallocations;
621 static struct ztrace* ztraces;
622
623 /* not static so that panic can see this, see kern/debug.c */
624 struct ztrace* top_ztrace;
625
626 /* Lock to protect zallocations, ztraces, and top_ztrace from concurrent modification. */
627 static lck_spin_t zleak_lock;
628 static lck_attr_t zleak_lock_attr;
629 static lck_grp_t zleak_lock_grp;
630 static lck_grp_attr_t zleak_lock_grp_attr;
631
632 /*
633 * Initializes the zone leak monitor. Called from zone_init()
634 */
635 static void
636 zleak_init(vm_size_t max_zonemap_size)
637 {
638 char scratch_buf[16];
639 boolean_t zleak_enable_flag = FALSE;
640
641 zleak_max_zonemap_size = max_zonemap_size;
642 zleak_global_tracking_threshold = max_zonemap_size / 2;
643 zleak_per_zone_tracking_threshold = zleak_global_tracking_threshold / 8;
644
645 #if CONFIG_EMBEDDED
646 if (PE_parse_boot_argn("-zleakon", scratch_buf, sizeof(scratch_buf))) {
647 zleak_enable_flag = TRUE;
648 printf("zone leak detection enabled\n");
649 } else {
650 zleak_enable_flag = FALSE;
651 printf("zone leak detection disabled\n");
652 }
653 #else /* CONFIG_EMBEDDED */
654 /* -zleakoff (flag to disable zone leak monitor) */
655 if (PE_parse_boot_argn("-zleakoff", scratch_buf, sizeof(scratch_buf))) {
656 zleak_enable_flag = FALSE;
657 printf("zone leak detection disabled\n");
658 } else {
659 zleak_enable_flag = TRUE;
660 printf("zone leak detection enabled\n");
661 }
662 #endif /* CONFIG_EMBEDDED */
663
664 /* zfactor=XXXX (override how often to sample the zone allocator) */
665 if (PE_parse_boot_argn("zfactor", &zleak_sample_factor, sizeof(zleak_sample_factor))) {
666 printf("Zone leak factor override:%u\n", zleak_sample_factor);
667 }
668
669 /* zleak-allocs=XXXX (override number of buckets in zallocations) */
670 if (PE_parse_boot_argn("zleak-allocs", &zleak_alloc_buckets, sizeof(zleak_alloc_buckets))) {
671 printf("Zone leak alloc buckets override:%u\n", zleak_alloc_buckets);
672 /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
673 if (zleak_alloc_buckets == 0 || (zleak_alloc_buckets & (zleak_alloc_buckets-1))) {
674 printf("Override isn't a power of two, bad things might happen!");
675 }
676 }
677
678 /* zleak-traces=XXXX (override number of buckets in ztraces) */
679 if (PE_parse_boot_argn("zleak-traces", &zleak_trace_buckets, sizeof(zleak_trace_buckets))) {
680 printf("Zone leak trace buckets override:%u\n", zleak_trace_buckets);
681 /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
682 if (zleak_trace_buckets == 0 || (zleak_trace_buckets & (zleak_trace_buckets-1))) {
683 printf("Override isn't a power of two, bad things might happen!");
684 }
685 }
686
687 /* allocate the zleak_lock */
688 lck_grp_attr_setdefault(&zleak_lock_grp_attr);
689 lck_grp_init(&zleak_lock_grp, "zleak_lock", &zleak_lock_grp_attr);
690 lck_attr_setdefault(&zleak_lock_attr);
691 lck_spin_init(&zleak_lock, &zleak_lock_grp, &zleak_lock_attr);
692
693 if (zleak_enable_flag) {
694 zleak_state = ZLEAK_STATE_ENABLED;
695 }
696 }
697
698 #if CONFIG_ZLEAKS
699
700 /*
701 * Support for kern.zleak.active sysctl - a simplified
702 * version of the zleak_state variable.
703 */
704 int
705 get_zleak_state(void)
706 {
707 if (zleak_state & ZLEAK_STATE_FAILED)
708 return (-1);
709 if (zleak_state & ZLEAK_STATE_ACTIVE)
710 return (1);
711 return (0);
712 }
713
714 #endif
715
716
717 kern_return_t
718 zleak_activate(void)
719 {
720 kern_return_t retval;
721 vm_size_t z_alloc_size = zleak_alloc_buckets * sizeof(struct zallocation);
722 vm_size_t z_trace_size = zleak_trace_buckets * sizeof(struct ztrace);
723 void *allocations_ptr = NULL;
724 void *traces_ptr = NULL;
725
726 /* Only one thread attempts to activate at a time */
727 if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) {
728 return KERN_SUCCESS;
729 }
730
731 /* Indicate that we're doing the setup */
732 lck_spin_lock(&zleak_lock);
733 if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) {
734 lck_spin_unlock(&zleak_lock);
735 return KERN_SUCCESS;
736 }
737
738 zleak_state |= ZLEAK_STATE_ACTIVATING;
739 lck_spin_unlock(&zleak_lock);
740
741 /* Allocate and zero tables */
742 retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&allocations_ptr, z_alloc_size);
743 if (retval != KERN_SUCCESS) {
744 goto fail;
745 }
746
747 retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&traces_ptr, z_trace_size);
748 if (retval != KERN_SUCCESS) {
749 goto fail;
750 }
751
752 bzero(allocations_ptr, z_alloc_size);
753 bzero(traces_ptr, z_trace_size);
754
755 /* Everything's set. Install tables, mark active. */
756 zallocations = allocations_ptr;
757 ztraces = traces_ptr;
758
759 /*
760 * Initialize the top_ztrace to the first entry in ztraces,
761 * so we don't have to check for null in zleak_log
762 */
763 top_ztrace = &ztraces[0];
764
765 /*
766 * Note that we do need a barrier between installing
767 * the tables and setting the active flag, because the zfree()
768 * path accesses the table without a lock if we're active.
769 */
770 lck_spin_lock(&zleak_lock);
771 zleak_state |= ZLEAK_STATE_ACTIVE;
772 zleak_state &= ~ZLEAK_STATE_ACTIVATING;
773 lck_spin_unlock(&zleak_lock);
774
775 return 0;
776
777 fail:
778 /*
779 * If we fail to allocate memory, don't further tax
780 * the system by trying again.
781 */
782 lck_spin_lock(&zleak_lock);
783 zleak_state |= ZLEAK_STATE_FAILED;
784 zleak_state &= ~ZLEAK_STATE_ACTIVATING;
785 lck_spin_unlock(&zleak_lock);
786
787 if (allocations_ptr != NULL) {
788 kmem_free(kernel_map, (vm_offset_t)allocations_ptr, z_alloc_size);
789 }
790
791 if (traces_ptr != NULL) {
792 kmem_free(kernel_map, (vm_offset_t)traces_ptr, z_trace_size);
793 }
794
795 return retval;
796 }
797
798 /*
799 * TODO: What about allocations that never get deallocated,
800 * especially ones with unique backtraces? Should we wait to record
801 * until after boot has completed?
802 * (How many persistent zallocs are there?)
803 */
804
805 /*
806 * This function records the allocation in the allocations table,
807 * and stores the associated backtrace in the traces table
808 * (or just increments the refcount if the trace is already recorded)
809 * If the allocation slot is in use, the old allocation is replaced with the new allocation, and
810 * the associated trace's refcount is decremented.
811 * If the trace slot is in use, it returns.
812 * The refcount is incremented by the amount of memory the allocation consumes.
813 * The return value indicates whether to try again next time.
814 */
815 static boolean_t
816 zleak_log(uintptr_t* bt,
817 uintptr_t addr,
818 uint32_t depth,
819 vm_size_t allocation_size)
820 {
821 /* Quit if there's someone else modifying the hash tables */
822 if (!lck_spin_try_lock(&zleak_lock)) {
823 z_total_conflicts++;
824 return FALSE;
825 }
826
827 struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)];
828
829 uint32_t trace_index = hashbacktrace(bt, depth, zleak_trace_buckets);
830 struct ztrace* trace = &ztraces[trace_index];
831
832 allocation->za_hit_count++;
833 trace->zt_hit_count++;
834
835 /*
836 * If the allocation bucket we want to be in is occupied, and if the occupier
837 * has the same trace as us, just bail.
838 */
839 if (allocation->za_element != (uintptr_t) 0 && trace_index == allocation->za_trace_index) {
840 z_alloc_collisions++;
841
842 lck_spin_unlock(&zleak_lock);
843 return TRUE;
844 }
845
846 /* STEP 1: Store the backtrace in the traces array. */
847 /* A size of zero indicates that the trace bucket is free. */
848
849 if (trace->zt_size > 0 && bcmp(trace->zt_stack, bt, (depth * sizeof(uintptr_t))) != 0 ) {
850 /*
851 * Different unique trace with same hash!
852 * Just bail - if we're trying to record the leaker, hopefully the other trace will be deallocated
853 * and get out of the way for later chances
854 */
855 trace->zt_collisions++;
856 z_trace_collisions++;
857
858 lck_spin_unlock(&zleak_lock);
859 return TRUE;
860 } else if (trace->zt_size > 0) {
861 /* Same trace, already added, so increment refcount */
862 trace->zt_size += allocation_size;
863 } else {
864 /* Found an unused trace bucket, record the trace here! */
865 if (trace->zt_depth != 0) /* if this slot was previously used but not currently in use */
866 z_trace_overwrites++;
867
868 z_trace_recorded++;
869 trace->zt_size = allocation_size;
870 memcpy(trace->zt_stack, bt, (depth * sizeof(uintptr_t)) );
871
872 trace->zt_depth = depth;
873 trace->zt_collisions = 0;
874 }
875
876 /* STEP 2: Store the allocation record in the allocations array. */
877
878 if (allocation->za_element != (uintptr_t) 0) {
879 /*
880 * Straight up replace any allocation record that was there. We don't want to do the work
881 * to preserve the allocation entries that were there, because we only record a subset of the
882 * allocations anyways.
883 */
884
885 z_alloc_collisions++;
886
887 struct ztrace* associated_trace = &ztraces[allocation->za_trace_index];
888 /* Knock off old allocation's size, not the new allocation */
889 associated_trace->zt_size -= allocation->za_size;
890 } else if (allocation->za_trace_index != 0) {
891 /* Slot previously used but not currently in use */
892 z_alloc_overwrites++;
893 }
894
895 allocation->za_element = addr;
896 allocation->za_trace_index = trace_index;
897 allocation->za_size = allocation_size;
898
899 z_alloc_recorded++;
900
901 if (top_ztrace->zt_size < trace->zt_size)
902 top_ztrace = trace;
903
904 lck_spin_unlock(&zleak_lock);
905 return TRUE;
906 }
907
908 /*
909 * Free the allocation record and release the stacktrace.
910 * This should be as fast as possible because it will be called for every free.
911 */
912 static void
913 zleak_free(uintptr_t addr,
914 vm_size_t allocation_size)
915 {
916 if (addr == (uintptr_t) 0)
917 return;
918
919 struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)];
920
921 /* Double-checked locking: check to find out if we're interested, lock, check to make
922 * sure it hasn't changed, then modify it, and release the lock.
923 */
924
925 if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
926 /* if the allocation was the one, grab the lock, check again, then delete it */
927 lck_spin_lock(&zleak_lock);
928
929 if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
930 struct ztrace *trace;
931
932 /* allocation_size had better match what was passed into zleak_log - otherwise someone is freeing into the wrong zone! */
933 if (allocation->za_size != allocation_size) {
934 panic("Freeing as size %lu memory that was allocated with size %lu\n",
935 (uintptr_t)allocation_size, (uintptr_t)allocation->za_size);
936 }
937
938 trace = &ztraces[allocation->za_trace_index];
939
940 /* size of 0 indicates trace bucket is unused */
941 if (trace->zt_size > 0) {
942 trace->zt_size -= allocation_size;
943 }
944
945 /* A NULL element means the allocation bucket is unused */
946 allocation->za_element = 0;
947 }
948 lck_spin_unlock(&zleak_lock);
949 }
950 }
951
952 #endif /* CONFIG_ZLEAKS */
953
954 /* These functions outside of CONFIG_ZLEAKS because they are also used in
955 * mbuf.c for mbuf leak-detection. This is why they lack the z_ prefix.
956 */
957
958 /*
959 * This function captures a backtrace from the current stack and
960 * returns the number of frames captured, limited by max_frames.
961 * It's fast because it does no checking to make sure there isn't bad data.
962 * Since it's only called from threads that we're going to keep executing,
963 * if there's bad data we were going to die eventually.
964 * If this function is inlined, it doesn't record the frame of the function it's inside.
965 * (because there's no stack frame!)
966 */
967
968 uint32_t
969 fastbacktrace(uintptr_t* bt, uint32_t max_frames)
970 {
971 uintptr_t* frameptr = NULL, *frameptr_next = NULL;
972 uintptr_t retaddr = 0;
973 uint32_t frame_index = 0, frames = 0;
974 uintptr_t kstackb, kstackt;
975 thread_t cthread = current_thread();
976
977 if (__improbable(cthread == NULL))
978 return 0;
979
980 kstackb = cthread->kernel_stack;
981 kstackt = kstackb + kernel_stack_size;
982 /* Load stack frame pointer (EBP on x86) into frameptr */
983 frameptr = __builtin_frame_address(0);
984
985 while (frameptr != NULL && frame_index < max_frames ) {
986 /* Next frame pointer is pointed to by the previous one */
987 frameptr_next = (uintptr_t*) *frameptr;
988
989 /* Bail if we see a zero in the stack frame, that means we've reached the top of the stack */
990 /* That also means the return address is worthless, so don't record it */
991 if (frameptr_next == NULL)
992 break;
993 /* Verify thread stack bounds */
994 if (((uintptr_t)frameptr_next > kstackt) || ((uintptr_t)frameptr_next < kstackb))
995 break;
996 /* Pull return address from one spot above the frame pointer */
997 retaddr = *(frameptr + 1);
998
999 /* Store it in the backtrace array */
1000 bt[frame_index++] = retaddr;
1001
1002 frameptr = frameptr_next;
1003 }
1004
1005 /* Save the number of frames captured for return value */
1006 frames = frame_index;
1007
1008 /* Fill in the rest of the backtrace with zeros */
1009 while (frame_index < max_frames)
1010 bt[frame_index++] = 0;
1011
1012 return frames;
1013 }
1014
1015 /* "Thomas Wang's 32/64 bit mix functions." http://www.concentric.net/~Ttwang/tech/inthash.htm */
1016 uintptr_t
1017 hash_mix(uintptr_t x)
1018 {
1019 #ifndef __LP64__
1020 x += ~(x << 15);
1021 x ^= (x >> 10);
1022 x += (x << 3 );
1023 x ^= (x >> 6 );
1024 x += ~(x << 11);
1025 x ^= (x >> 16);
1026 #else
1027 x += ~(x << 32);
1028 x ^= (x >> 22);
1029 x += ~(x << 13);
1030 x ^= (x >> 8 );
1031 x += (x << 3 );
1032 x ^= (x >> 15);
1033 x += ~(x << 27);
1034 x ^= (x >> 31);
1035 #endif
1036 return x;
1037 }
1038
1039 uint32_t
1040 hashbacktrace(uintptr_t* bt, uint32_t depth, uint32_t max_size)
1041 {
1042
1043 uintptr_t hash = 0;
1044 uintptr_t mask = max_size - 1;
1045
1046 while (depth) {
1047 hash += bt[--depth];
1048 }
1049
1050 hash = hash_mix(hash) & mask;
1051
1052 assert(hash < max_size);
1053
1054 return (uint32_t) hash;
1055 }
1056
1057 /*
1058 * TODO: Determine how well distributed this is
1059 * max_size must be a power of 2. i.e 0x10000 because 0x10000-1 is 0x0FFFF which is a great bitmask
1060 */
1061 uint32_t
1062 hashaddr(uintptr_t pt, uint32_t max_size)
1063 {
1064 uintptr_t hash = 0;
1065 uintptr_t mask = max_size - 1;
1066
1067 hash = hash_mix(pt) & mask;
1068
1069 assert(hash < max_size);
1070
1071 return (uint32_t) hash;
1072 }
1073
1074 /* End of all leak-detection code */
1075 #pragma mark -
1076
1077 /*
1078 * zinit initializes a new zone. The zone data structures themselves
1079 * are stored in a zone, which is initially a static structure that
1080 * is initialized by zone_init.
1081 */
1082 zone_t
1083 zinit(
1084 vm_size_t size, /* the size of an element */
1085 vm_size_t max, /* maximum memory to use */
1086 vm_size_t alloc, /* allocation size */
1087 const char *name) /* a name for the zone */
1088 {
1089 zone_t z;
1090
1091 if (zone_zone == ZONE_NULL) {
1092
1093 z = (struct zone *)zdata;
1094 zdata += sizeof(*z);
1095 zdata_size -= sizeof(*z);
1096 } else
1097 z = (zone_t) zalloc(zone_zone);
1098
1099 if (z == ZONE_NULL)
1100 return(ZONE_NULL);
1101
1102 /*
1103 * Round off all the parameters appropriately.
1104 */
1105 if (size < sizeof(z->free_elements))
1106 size = sizeof(z->free_elements);
1107 size = ((size-1) + sizeof(z->free_elements)) -
1108 ((size-1) % sizeof(z->free_elements));
1109 if (alloc == 0)
1110 alloc = PAGE_SIZE;
1111 alloc = round_page(alloc);
1112 max = round_page(max);
1113 /*
1114 * we look for an allocation size with less than 1% waste
1115 * up to 5 pages in size...
1116 * otherwise, we look for an allocation size with least fragmentation
1117 * in the range of 1 - 5 pages
1118 * This size will be used unless
1119 * the user suggestion is larger AND has less fragmentation
1120 */
1121 #if ZONE_ALIAS_ADDR
1122 if ((size < PAGE_SIZE) && (PAGE_SIZE % size <= PAGE_SIZE / 10))
1123 alloc = PAGE_SIZE;
1124 else
1125 #endif
1126 #if defined(__LP64__)
1127 if (((alloc % size) != 0) || (alloc > PAGE_SIZE * 8))
1128 #endif
1129 {
1130 vm_size_t best, waste; unsigned int i;
1131 best = PAGE_SIZE;
1132 waste = best % size;
1133
1134 for (i = 1; i <= 5; i++) {
1135 vm_size_t tsize, twaste;
1136
1137 tsize = i * PAGE_SIZE;
1138
1139 if ((tsize % size) < (tsize / 100)) {
1140 alloc = tsize;
1141 goto use_this_allocation;
1142 }
1143 twaste = tsize % size;
1144 if (twaste < waste)
1145 best = tsize, waste = twaste;
1146 }
1147 if (alloc <= best || (alloc % size >= waste))
1148 alloc = best;
1149 }
1150 use_this_allocation:
1151 if (max && (max < alloc))
1152 max = alloc;
1153
1154 z->free_elements = 0;
1155 z->cur_size = 0;
1156 z->max_size = max;
1157 z->elem_size = size;
1158 z->alloc_size = alloc;
1159 z->zone_name = name;
1160 z->count = 0;
1161 z->sum_count = 0LL;
1162 z->doing_alloc = FALSE;
1163 z->doing_gc = FALSE;
1164 z->exhaustible = FALSE;
1165 z->collectable = TRUE;
1166 z->allows_foreign = FALSE;
1167 z->expandable = TRUE;
1168 z->waiting = FALSE;
1169 z->async_pending = FALSE;
1170 z->caller_acct = TRUE;
1171 z->noencrypt = FALSE;
1172 z->no_callout = FALSE;
1173 z->async_prio_refill = FALSE;
1174 z->gzalloc_exempt = FALSE;
1175 z->alignment_required = FALSE;
1176 z->prio_refill_watermark = 0;
1177 z->zone_replenish_thread = NULL;
1178 #if CONFIG_ZLEAKS
1179 z->num_allocs = 0;
1180 z->num_frees = 0;
1181 z->zleak_capture = 0;
1182 z->zleak_on = FALSE;
1183 #endif /* CONFIG_ZLEAKS */
1184
1185 #if ZONE_DEBUG
1186 z->active_zones.next = z->active_zones.prev = NULL;
1187 zone_debug_enable(z);
1188 #endif /* ZONE_DEBUG */
1189 lock_zone_init(z);
1190
1191 /*
1192 * Add the zone to the all-zones list.
1193 * If we are tracking zone info per task, and we have
1194 * already used all the available stat slots, then keep
1195 * using the overflow zone slot.
1196 */
1197 z->next_zone = ZONE_NULL;
1198 thread_call_setup(&z->call_async_alloc, zalloc_async, z);
1199 simple_lock(&all_zones_lock);
1200 *last_zone = z;
1201 last_zone = &z->next_zone;
1202 z->index = num_zones;
1203 if (zinfo_per_task) {
1204 if (num_zones > ZONES_MAX)
1205 z->index = ZONES_MAX;
1206 }
1207 num_zones++;
1208 simple_unlock(&all_zones_lock);
1209
1210 /*
1211 * Check if we should be logging this zone. If so, remember the zone pointer.
1212 */
1213 if (log_this_zone(z->zone_name, zone_name_to_log)) {
1214 zone_of_interest = z;
1215 }
1216
1217 /*
1218 * If we want to log a zone, see if we need to allocate buffer space for the log. Some vm related zones are
1219 * zinit'ed before we can do a kmem_alloc, so we have to defer allocation in that case. zlog_ready is set to
1220 * TRUE once enough of the VM system is up and running to allow a kmem_alloc to work. If we want to log one
1221 * of the VM related zones that's set up early on, we will skip allocation of the log until zinit is called again
1222 * later on some other zone. So note we may be allocating a buffer to log a zone other than the one being initialized
1223 * right now.
1224 */
1225 if (zone_of_interest != NULL && zrecords == NULL && zlog_ready) {
1226 if (kmem_alloc(kernel_map, (vm_offset_t *)&zrecords, log_records * sizeof(struct zrecord)) == KERN_SUCCESS) {
1227
1228 /*
1229 * We got the memory for the log. Zero it out since the code needs this to identify unused records.
1230 * At this point, everything is set up and we're ready to start logging this zone.
1231 */
1232
1233 bzero((void *)zrecords, log_records * sizeof(struct zrecord));
1234 printf("zone: logging started for zone %s (%p)\n", zone_of_interest->zone_name, zone_of_interest);
1235
1236 } else {
1237 printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n");
1238 zone_of_interest = NULL;
1239 }
1240 }
1241 #if CONFIG_GZALLOC
1242 gzalloc_zone_init(z);
1243 #endif
1244 return(z);
1245 }
1246 unsigned zone_replenish_loops, zone_replenish_wakeups, zone_replenish_wakeups_initiated;
1247
1248 static void zone_replenish_thread(zone_t);
1249
1250 /* High priority VM privileged thread used to asynchronously refill a designated
1251 * zone, such as the reserved VM map entry zone.
1252 */
1253 static void zone_replenish_thread(zone_t z) {
1254 vm_size_t free_size;
1255 current_thread()->options |= TH_OPT_VMPRIV;
1256
1257 for (;;) {
1258 lock_zone(z);
1259 assert(z->prio_refill_watermark != 0);
1260 while ((free_size = (z->cur_size - (z->count * z->elem_size))) < (z->prio_refill_watermark * z->elem_size)) {
1261 assert(z->doing_alloc == FALSE);
1262 assert(z->async_prio_refill == TRUE);
1263
1264 unlock_zone(z);
1265 int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT;
1266 vm_offset_t space, alloc_size;
1267 kern_return_t kr;
1268
1269 if (vm_pool_low())
1270 alloc_size = round_page(z->elem_size);
1271 else
1272 alloc_size = z->alloc_size;
1273
1274 if (z->noencrypt)
1275 zflags |= KMA_NOENCRYPT;
1276
1277 kr = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags);
1278
1279 if (kr == KERN_SUCCESS) {
1280 #if ZONE_ALIAS_ADDR
1281 if (alloc_size == PAGE_SIZE)
1282 space = zone_alias_addr(space);
1283 #endif
1284 zcram(z, space, alloc_size);
1285 } else if (kr == KERN_RESOURCE_SHORTAGE) {
1286 VM_PAGE_WAIT();
1287 } else if (kr == KERN_NO_SPACE) {
1288 kr = kernel_memory_allocate(kernel_map, &space, alloc_size, 0, zflags);
1289 if (kr == KERN_SUCCESS) {
1290 #if ZONE_ALIAS_ADDR
1291 if (alloc_size == PAGE_SIZE)
1292 space = zone_alias_addr(space);
1293 #endif
1294 zcram(z, space, alloc_size);
1295 } else {
1296 assert_wait_timeout(&z->zone_replenish_thread, THREAD_UNINT, 1, 100 * NSEC_PER_USEC);
1297 thread_block(THREAD_CONTINUE_NULL);
1298 }
1299 }
1300
1301 lock_zone(z);
1302 zone_replenish_loops++;
1303 }
1304
1305 unlock_zone(z);
1306 assert_wait(&z->zone_replenish_thread, THREAD_UNINT);
1307 thread_block(THREAD_CONTINUE_NULL);
1308 zone_replenish_wakeups++;
1309 }
1310 }
1311
1312 void
1313 zone_prio_refill_configure(zone_t z, vm_size_t low_water_mark) {
1314 z->prio_refill_watermark = low_water_mark;
1315
1316 z->async_prio_refill = TRUE;
1317 OSMemoryBarrier();
1318 kern_return_t tres = kernel_thread_start_priority((thread_continue_t)zone_replenish_thread, z, MAXPRI_KERNEL, &z->zone_replenish_thread);
1319
1320 if (tres != KERN_SUCCESS) {
1321 panic("zone_prio_refill_configure, thread create: 0x%x", tres);
1322 }
1323
1324 thread_deallocate(z->zone_replenish_thread);
1325 }
1326
1327 /*
1328 * Cram the given memory into the specified zone.
1329 */
1330 void
1331 zcram(
1332 zone_t zone,
1333 vm_offset_t newmem,
1334 vm_size_t size)
1335 {
1336 vm_size_t elem_size;
1337 boolean_t from_zm = FALSE;
1338
1339 /* Basic sanity checks */
1340 assert(zone != ZONE_NULL && newmem != (vm_offset_t)0);
1341 assert(!zone->collectable || zone->allows_foreign
1342 || (from_zone_map(newmem, size)));
1343
1344 elem_size = zone->elem_size;
1345
1346 if (from_zone_map(newmem, size))
1347 from_zm = TRUE;
1348
1349 if (from_zm)
1350 zone_page_init(newmem, size);
1351
1352 lock_zone(zone);
1353 while (size >= elem_size) {
1354 free_to_zone(zone, (void *) newmem);
1355 if (from_zm)
1356 zone_page_alloc(newmem, elem_size);
1357 zone->count++; /* compensate for free_to_zone */
1358 size -= elem_size;
1359 newmem += elem_size;
1360 zone->cur_size += elem_size;
1361 }
1362 unlock_zone(zone);
1363 }
1364
1365
1366 /*
1367 * Steal memory for the zone package. Called from
1368 * vm_page_bootstrap().
1369 */
1370 void
1371 zone_steal_memory(void)
1372 {
1373 #if CONFIG_GZALLOC
1374 gzalloc_configure();
1375 #endif
1376 /* Request enough early memory to get to the pmap zone */
1377 zdata_size = 12 * sizeof(struct zone);
1378 zdata = (vm_offset_t)pmap_steal_memory(round_page(zdata_size));
1379 }
1380
1381
1382 /*
1383 * Fill a zone with enough memory to contain at least nelem elements.
1384 * Memory is obtained with kmem_alloc_kobject from the kernel_map.
1385 * Return the number of elements actually put into the zone, which may
1386 * be more than the caller asked for since the memory allocation is
1387 * rounded up to a full page.
1388 */
1389 int
1390 zfill(
1391 zone_t zone,
1392 int nelem)
1393 {
1394 kern_return_t kr;
1395 vm_size_t size;
1396 vm_offset_t memory;
1397 int nalloc;
1398
1399 assert(nelem > 0);
1400 if (nelem <= 0)
1401 return 0;
1402 size = nelem * zone->elem_size;
1403 size = round_page(size);
1404 kr = kmem_alloc_kobject(kernel_map, &memory, size);
1405 if (kr != KERN_SUCCESS)
1406 return 0;
1407
1408 zone_change(zone, Z_FOREIGN, TRUE);
1409 zcram(zone, memory, size);
1410 nalloc = (int)(size / zone->elem_size);
1411 assert(nalloc >= nelem);
1412
1413 return nalloc;
1414 }
1415
1416 /*
1417 * Initialize the "zone of zones" which uses fixed memory allocated
1418 * earlier in memory initialization. zone_bootstrap is called
1419 * before zone_init.
1420 */
1421 void
1422 zone_bootstrap(void)
1423 {
1424 char temp_buf[16];
1425
1426 if (PE_parse_boot_argn("-zinfop", temp_buf, sizeof(temp_buf))) {
1427 zinfo_per_task = TRUE;
1428 }
1429
1430 /* do we want corruption-style debugging with zlog? */
1431 if (PE_parse_boot_argn("-zc", temp_buf, sizeof(temp_buf))) {
1432 corruption_debug_flag = TRUE;
1433 }
1434
1435 /* Set up zone poisoning */
1436
1437 free_check_sample_factor = ZP_DEFAULT_SAMPLING_FACTOR;
1438
1439 /* support for old zone poisoning boot-args */
1440 if (PE_parse_boot_argn("-zp", temp_buf, sizeof(temp_buf))) {
1441 free_check_sample_factor = 1;
1442 }
1443 if (PE_parse_boot_argn("-no-zp", temp_buf, sizeof(temp_buf))) {
1444 free_check_sample_factor = 0;
1445 }
1446
1447 /* zp-factor=XXXX (override how often to poison freed zone elements) */
1448 if (PE_parse_boot_argn("zp-factor", &free_check_sample_factor, sizeof(free_check_sample_factor))) {
1449 printf("Zone poisoning factor override:%u\n", free_check_sample_factor);
1450 }
1451
1452 /*
1453 * Check for and set up zone leak detection if requested via boot-args. We recognized two
1454 * boot-args:
1455 *
1456 * zlog=<zone_to_log>
1457 * zrecs=<num_records_in_log>
1458 *
1459 * The zlog arg is used to specify the zone name that should be logged, and zrecs is used to
1460 * control the size of the log. If zrecs is not specified, a default value is used.
1461 */
1462
1463 if (PE_parse_boot_argn("zlog", zone_name_to_log, sizeof(zone_name_to_log)) == TRUE) {
1464 if (PE_parse_boot_argn("zrecs", &log_records, sizeof(log_records)) == TRUE) {
1465
1466 /*
1467 * Don't allow more than ZRECORDS_MAX records even if the user asked for more.
1468 * This prevents accidentally hogging too much kernel memory and making the system
1469 * unusable.
1470 */
1471
1472 log_records = MIN(ZRECORDS_MAX, log_records);
1473
1474 } else {
1475 log_records = ZRECORDS_DEFAULT;
1476 }
1477 }
1478
1479 simple_lock_init(&all_zones_lock, 0);
1480
1481 first_zone = ZONE_NULL;
1482 last_zone = &first_zone;
1483 num_zones = 0;
1484
1485 /* assertion: nobody else called zinit before us */
1486 assert(zone_zone == ZONE_NULL);
1487 zone_zone = zinit(sizeof(struct zone), 128 * sizeof(struct zone),
1488 sizeof(struct zone), "zones");
1489 zone_change(zone_zone, Z_COLLECT, FALSE);
1490 zone_change(zone_zone, Z_CALLERACCT, FALSE);
1491 zone_change(zone_zone, Z_NOENCRYPT, TRUE);
1492
1493 zcram(zone_zone, zdata, zdata_size);
1494
1495 /* initialize fake zones and zone info if tracking by task */
1496 if (zinfo_per_task) {
1497 vm_size_t zisize = sizeof(zinfo_usage_store_t) * ZINFO_SLOTS;
1498 unsigned int i;
1499
1500 for (i = 0; i < num_fake_zones; i++)
1501 fake_zones[i].init(ZINFO_SLOTS - num_fake_zones + i);
1502 zinfo_zone = zinit(zisize, zisize * CONFIG_TASK_MAX,
1503 zisize, "per task zinfo");
1504 zone_change(zinfo_zone, Z_CALLERACCT, FALSE);
1505 }
1506 }
1507
1508 void
1509 zinfo_task_init(task_t task)
1510 {
1511 if (zinfo_per_task) {
1512 task->tkm_zinfo = zalloc(zinfo_zone);
1513 memset(task->tkm_zinfo, 0, sizeof(zinfo_usage_store_t) * ZINFO_SLOTS);
1514 } else {
1515 task->tkm_zinfo = NULL;
1516 }
1517 }
1518
1519 void
1520 zinfo_task_free(task_t task)
1521 {
1522 assert(task != kernel_task);
1523 if (task->tkm_zinfo != NULL) {
1524 zfree(zinfo_zone, task->tkm_zinfo);
1525 task->tkm_zinfo = NULL;
1526 }
1527 }
1528
1529 void
1530 zone_init(
1531 vm_size_t max_zonemap_size)
1532 {
1533 kern_return_t retval;
1534 vm_offset_t zone_min;
1535 vm_offset_t zone_max;
1536
1537 retval = kmem_suballoc(kernel_map, &zone_min, max_zonemap_size,
1538 FALSE, VM_FLAGS_ANYWHERE | VM_FLAGS_PERMANENT,
1539 &zone_map);
1540
1541 if (retval != KERN_SUCCESS)
1542 panic("zone_init: kmem_suballoc failed");
1543 zone_max = zone_min + round_page(max_zonemap_size);
1544 #if CONFIG_GZALLOC
1545 gzalloc_init(max_zonemap_size);
1546 #endif
1547 /*
1548 * Setup garbage collection information:
1549 */
1550 zone_map_min_address = zone_min;
1551 zone_map_max_address = zone_max;
1552
1553 zone_pages = (unsigned int)atop_kernel(zone_max - zone_min);
1554 zone_page_table_used_size = sizeof(zone_page_table);
1555
1556 zone_page_table_second_level_size = 1;
1557 zone_page_table_second_level_shift_amount = 0;
1558
1559 /*
1560 * Find the power of 2 for the second level that allows
1561 * the first level to fit in ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE
1562 * slots.
1563 */
1564 while ((zone_page_table_first_level_slot(zone_pages-1)) >= ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE) {
1565 zone_page_table_second_level_size <<= 1;
1566 zone_page_table_second_level_shift_amount++;
1567 }
1568
1569 lck_grp_attr_setdefault(&zone_lck_grp_attr);
1570 lck_grp_init(&zone_lck_grp, "zones", &zone_lck_grp_attr);
1571 lck_attr_setdefault(&zone_lck_attr);
1572 lck_mtx_init_ext(&zone_gc_lock, &zone_lck_ext, &zone_lck_grp, &zone_lck_attr);
1573
1574 #if CONFIG_ZLEAKS
1575 /*
1576 * Initialize the zone leak monitor
1577 */
1578 zleak_init(max_zonemap_size);
1579 #endif /* CONFIG_ZLEAKS */
1580 }
1581
1582 void
1583 zone_page_table_expand(zone_page_index_t pindex)
1584 {
1585 unsigned int first_index;
1586 struct zone_page_table_entry * volatile * first_level_ptr;
1587
1588 assert(pindex < zone_pages);
1589
1590 first_index = zone_page_table_first_level_slot(pindex);
1591 first_level_ptr = &zone_page_table[first_index];
1592
1593 if (*first_level_ptr == NULL) {
1594 /*
1595 * We were able to verify the old first-level slot
1596 * had NULL, so attempt to populate it.
1597 */
1598
1599 vm_offset_t second_level_array = 0;
1600 vm_size_t second_level_size = round_page(zone_page_table_second_level_size * sizeof(struct zone_page_table_entry));
1601 zone_page_index_t i;
1602 struct zone_page_table_entry *entry_array;
1603
1604 if (kmem_alloc_kobject(zone_map, &second_level_array,
1605 second_level_size) != KERN_SUCCESS) {
1606 panic("zone_page_table_expand");
1607 }
1608
1609 /*
1610 * zone_gc() may scan the "zone_page_table" directly,
1611 * so make sure any slots have a valid unused state.
1612 */
1613 entry_array = (struct zone_page_table_entry *)second_level_array;
1614 for (i=0; i < zone_page_table_second_level_size; i++) {
1615 entry_array[i].alloc_count = ZONE_PAGE_UNUSED;
1616 entry_array[i].collect_count = 0;
1617 }
1618
1619 if (OSCompareAndSwapPtr(NULL, entry_array, first_level_ptr)) {
1620 /* Old slot was NULL, replaced with expanded level */
1621 OSAddAtomicLong(second_level_size, &zone_page_table_used_size);
1622 } else {
1623 /* Old slot was not NULL, someone else expanded first */
1624 kmem_free(zone_map, second_level_array, second_level_size);
1625 }
1626 } else {
1627 /* Old slot was not NULL, already been expanded */
1628 }
1629 }
1630
1631 struct zone_page_table_entry *
1632 zone_page_table_lookup(zone_page_index_t pindex)
1633 {
1634 unsigned int first_index = zone_page_table_first_level_slot(pindex);
1635 struct zone_page_table_entry *second_level = zone_page_table[first_index];
1636
1637 if (second_level) {
1638 return &second_level[zone_page_table_second_level_slot(pindex)];
1639 }
1640
1641 return NULL;
1642 }
1643
1644 extern volatile SInt32 kfree_nop_count;
1645
1646 #pragma mark -
1647 #pragma mark zalloc_canblock
1648
1649 /*
1650 * zalloc returns an element from the specified zone.
1651 */
1652 void *
1653 zalloc_canblock(
1654 register zone_t zone,
1655 boolean_t canblock)
1656 {
1657 vm_offset_t addr = 0;
1658 kern_return_t retval;
1659 uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used in zone leak logging and zone leak detection */
1660 int numsaved = 0;
1661 int i;
1662 boolean_t zone_replenish_wakeup = FALSE;
1663 boolean_t did_gzalloc;
1664
1665 did_gzalloc = FALSE;
1666 #if CONFIG_ZLEAKS
1667 uint32_t zleak_tracedepth = 0; /* log this allocation if nonzero */
1668 #endif /* CONFIG_ZLEAKS */
1669
1670 assert(zone != ZONE_NULL);
1671
1672 #if CONFIG_GZALLOC
1673 addr = gzalloc_alloc(zone, canblock);
1674 did_gzalloc = (addr != 0);
1675 #endif
1676
1677 lock_zone(zone);
1678
1679 /*
1680 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
1681 */
1682
1683 if (DO_LOGGING(zone))
1684 numsaved = OSBacktrace((void*) zbt, MAX_ZTRACE_DEPTH);
1685
1686 #if CONFIG_ZLEAKS
1687 /*
1688 * Zone leak detection: capture a backtrace every zleak_sample_factor
1689 * allocations in this zone.
1690 */
1691 if (zone->zleak_on && (zone->zleak_capture++ % zleak_sample_factor == 0)) {
1692 zone->zleak_capture = 1;
1693
1694 /* Avoid backtracing twice if zone logging is on */
1695 if (numsaved == 0 )
1696 zleak_tracedepth = fastbacktrace(zbt, MAX_ZTRACE_DEPTH);
1697 else
1698 zleak_tracedepth = numsaved;
1699 }
1700 #endif /* CONFIG_ZLEAKS */
1701
1702 if (__probable(addr == 0))
1703 alloc_from_zone(zone, (void **) &addr);
1704
1705 if (zone->async_prio_refill &&
1706 ((zone->cur_size - (zone->count * zone->elem_size)) <
1707 (zone->prio_refill_watermark * zone->elem_size))) {
1708 zone_replenish_wakeup = TRUE;
1709 zone_replenish_wakeups_initiated++;
1710 }
1711
1712 while ((addr == 0) && canblock) {
1713 /*
1714 * If nothing was there, try to get more
1715 */
1716 if (zone->doing_alloc) {
1717 /*
1718 * Someone is allocating memory for this zone.
1719 * Wait for it to show up, then try again.
1720 */
1721 zone->waiting = TRUE;
1722 zone_sleep(zone);
1723 } else if (zone->doing_gc) {
1724 /* zone_gc() is running. Since we need an element
1725 * from the free list that is currently being
1726 * collected, set the waiting bit and try to
1727 * interrupt the GC process, and try again
1728 * when we obtain the lock.
1729 */
1730 zone->waiting = TRUE;
1731 zone_sleep(zone);
1732 } else {
1733 vm_offset_t space;
1734 vm_size_t alloc_size;
1735 int retry = 0;
1736
1737 if ((zone->cur_size + zone->elem_size) >
1738 zone->max_size) {
1739 if (zone->exhaustible)
1740 break;
1741 if (zone->expandable) {
1742 /*
1743 * We're willing to overflow certain
1744 * zones, but not without complaining.
1745 *
1746 * This is best used in conjunction
1747 * with the collectable flag. What we
1748 * want is an assurance we can get the
1749 * memory back, assuming there's no
1750 * leak.
1751 */
1752 zone->max_size += (zone->max_size >> 1);
1753 } else {
1754 unlock_zone(zone);
1755
1756 panic_include_zprint = TRUE;
1757 #if CONFIG_ZLEAKS
1758 if (zleak_state & ZLEAK_STATE_ACTIVE)
1759 panic_include_ztrace = TRUE;
1760 #endif /* CONFIG_ZLEAKS */
1761 panic("zalloc: zone \"%s\" empty.", zone->zone_name);
1762 }
1763 }
1764 zone->doing_alloc = TRUE;
1765 unlock_zone(zone);
1766
1767 for (;;) {
1768 int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT;
1769
1770 if (vm_pool_low() || retry >= 1)
1771 alloc_size =
1772 round_page(zone->elem_size);
1773 else
1774 alloc_size = zone->alloc_size;
1775
1776 if (zone->noencrypt)
1777 zflags |= KMA_NOENCRYPT;
1778
1779 retval = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags);
1780 if (retval == KERN_SUCCESS) {
1781 #if ZONE_ALIAS_ADDR
1782 if (alloc_size == PAGE_SIZE)
1783 space = zone_alias_addr(space);
1784 #endif
1785
1786 #if CONFIG_ZLEAKS
1787 if ((zleak_state & (ZLEAK_STATE_ENABLED | ZLEAK_STATE_ACTIVE)) == ZLEAK_STATE_ENABLED) {
1788 if (zone_map->size >= zleak_global_tracking_threshold) {
1789 kern_return_t kr;
1790
1791 kr = zleak_activate();
1792 if (kr != KERN_SUCCESS) {
1793 printf("Failed to activate live zone leak debugging (%d).\n", kr);
1794 }
1795 }
1796 }
1797
1798 if ((zleak_state & ZLEAK_STATE_ACTIVE) && !(zone->zleak_on)) {
1799 if (zone->cur_size > zleak_per_zone_tracking_threshold) {
1800 zone->zleak_on = TRUE;
1801 }
1802 }
1803 #endif /* CONFIG_ZLEAKS */
1804
1805 zcram(zone, space, alloc_size);
1806
1807 break;
1808 } else if (retval != KERN_RESOURCE_SHORTAGE) {
1809 retry++;
1810
1811 if (retry == 2) {
1812 zone_gc(TRUE);
1813 printf("zalloc did gc\n");
1814 zone_display_zprint();
1815 }
1816 if (retry == 3) {
1817 panic_include_zprint = TRUE;
1818 #if CONFIG_ZLEAKS
1819 if ((zleak_state & ZLEAK_STATE_ACTIVE)) {
1820 panic_include_ztrace = TRUE;
1821 }
1822 #endif /* CONFIG_ZLEAKS */
1823 /* TODO: Change this to something more descriptive, perhaps
1824 * 'zone_map exhausted' only if we get retval 3 (KERN_NO_SPACE).
1825 */
1826 panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone->zone_name, zone->count, retval, (int)kfree_nop_count);
1827 }
1828 } else {
1829 break;
1830 }
1831 }
1832 lock_zone(zone);
1833 zone->doing_alloc = FALSE;
1834 if (zone->waiting) {
1835 zone->waiting = FALSE;
1836 zone_wakeup(zone);
1837 }
1838 alloc_from_zone(zone, (void **) &addr);
1839 if (addr == 0 &&
1840 retval == KERN_RESOURCE_SHORTAGE) {
1841 unlock_zone(zone);
1842
1843 VM_PAGE_WAIT();
1844 lock_zone(zone);
1845 }
1846 }
1847 if (addr == 0)
1848 alloc_from_zone(zone, (void **) &addr);
1849 }
1850
1851 #if CONFIG_ZLEAKS
1852 /* Zone leak detection:
1853 * If we're sampling this allocation, add it to the zleaks hash table.
1854 */
1855 if (addr && zleak_tracedepth > 0) {
1856 /* Sampling can fail if another sample is happening at the same time in a different zone. */
1857 if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) {
1858 /* If it failed, roll back the counter so we sample the next allocation instead. */
1859 zone->zleak_capture = zleak_sample_factor;
1860 }
1861 }
1862 #endif /* CONFIG_ZLEAKS */
1863
1864
1865 /*
1866 * See if we should be logging allocations in this zone. Logging is rarely done except when a leak is
1867 * suspected, so this code rarely executes. We need to do this code while still holding the zone lock
1868 * since it protects the various log related data structures.
1869 */
1870
1871 if (DO_LOGGING(zone) && addr) {
1872
1873 /*
1874 * Look for a place to record this new allocation. We implement two different logging strategies
1875 * depending on whether we're looking for the source of a zone leak or a zone corruption. When looking
1876 * for a leak, we want to log as many allocations as possible in order to clearly identify the leaker
1877 * among all the records. So we look for an unused slot in the log and fill that in before overwriting
1878 * an old entry. When looking for a corruption however, it's better to have a chronological log of all
1879 * the allocations and frees done in the zone so that the history of operations for a specific zone
1880 * element can be inspected. So in this case, we treat the log as a circular buffer and overwrite the
1881 * oldest entry whenever a new one needs to be added.
1882 *
1883 * The corruption_debug_flag flag tells us what style of logging to do. It's set if we're supposed to be
1884 * doing corruption style logging (indicated via -zc in the boot-args).
1885 */
1886
1887 if (!corruption_debug_flag && zrecords[zcurrent].z_element && zrecorded < log_records) {
1888
1889 /*
1890 * If we get here, we're doing leak style logging and there's still some unused entries in
1891 * the log (since zrecorded is smaller than the size of the log). Look for an unused slot
1892 * starting at zcurrent and wrap-around if we reach the end of the buffer. If the buffer
1893 * is already full, we just fall through and overwrite the element indexed by zcurrent.
1894 */
1895
1896 for (i = zcurrent; i < log_records; i++) {
1897 if (zrecords[i].z_element == NULL) {
1898 zcurrent = i;
1899 goto empty_slot;
1900 }
1901 }
1902
1903 for (i = 0; i < zcurrent; i++) {
1904 if (zrecords[i].z_element == NULL) {
1905 zcurrent = i;
1906 goto empty_slot;
1907 }
1908 }
1909 }
1910
1911 /*
1912 * Save a record of this allocation
1913 */
1914
1915 empty_slot:
1916 if (zrecords[zcurrent].z_element == NULL)
1917 zrecorded++;
1918
1919 zrecords[zcurrent].z_element = (void *)addr;
1920 zrecords[zcurrent].z_time = ztime++;
1921 zrecords[zcurrent].z_opcode = ZOP_ALLOC;
1922
1923 for (i = 0; i < numsaved; i++)
1924 zrecords[zcurrent].z_pc[i] = (void*) zbt[i];
1925
1926 for (; i < MAX_ZTRACE_DEPTH; i++)
1927 zrecords[zcurrent].z_pc[i] = 0;
1928
1929 zcurrent++;
1930
1931 if (zcurrent >= log_records)
1932 zcurrent = 0;
1933 }
1934
1935 if ((addr == 0) && !canblock && (zone->async_pending == FALSE) && (zone->no_callout == FALSE) && (zone->exhaustible == FALSE) && (!vm_pool_low())) {
1936 zone->async_pending = TRUE;
1937 unlock_zone(zone);
1938 thread_call_enter(&zone->call_async_alloc);
1939 lock_zone(zone);
1940 alloc_from_zone(zone, (void **) &addr);
1941 }
1942
1943 #if ZONE_DEBUG
1944 if (!did_gzalloc && addr && zone_debug_enabled(zone)) {
1945 enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
1946 addr += ZONE_DEBUG_OFFSET;
1947 }
1948 #endif
1949
1950 #if CONFIG_ZLEAKS
1951 if (addr != 0) {
1952 zone->num_allocs++;
1953 }
1954 #endif /* CONFIG_ZLEAKS */
1955
1956 unlock_zone(zone);
1957
1958 if (zone_replenish_wakeup)
1959 thread_wakeup(&zone->zone_replenish_thread);
1960
1961 TRACE_MACHLEAKS(ZALLOC_CODE, ZALLOC_CODE_2, zone->elem_size, addr);
1962
1963 if (addr) {
1964 thread_t thr = current_thread();
1965 task_t task;
1966 zinfo_usage_t zinfo;
1967 vm_size_t sz = zone->elem_size;
1968
1969 if (zone->caller_acct)
1970 ledger_credit(thr->t_ledger, task_ledgers.tkm_private, sz);
1971 else
1972 ledger_credit(thr->t_ledger, task_ledgers.tkm_shared, sz);
1973
1974 if ((task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
1975 OSAddAtomic64(sz, (int64_t *)&zinfo[zone->index].alloc);
1976 }
1977 return((void *)addr);
1978 }
1979
1980
1981 void *
1982 zalloc(
1983 register zone_t zone)
1984 {
1985 return( zalloc_canblock(zone, TRUE) );
1986 }
1987
1988 void *
1989 zalloc_noblock(
1990 register zone_t zone)
1991 {
1992 return( zalloc_canblock(zone, FALSE) );
1993 }
1994
1995 void
1996 zalloc_async(
1997 thread_call_param_t p0,
1998 __unused thread_call_param_t p1)
1999 {
2000 void *elt;
2001
2002 elt = zalloc_canblock((zone_t)p0, TRUE);
2003 zfree((zone_t)p0, elt);
2004 lock_zone(((zone_t)p0));
2005 ((zone_t)p0)->async_pending = FALSE;
2006 unlock_zone(((zone_t)p0));
2007 }
2008
2009 /*
2010 * zget returns an element from the specified zone
2011 * and immediately returns nothing if there is nothing there.
2012 *
2013 * This form should be used when you can not block (like when
2014 * processing an interrupt).
2015 *
2016 * XXX: It seems like only vm_page_grab_fictitious_common uses this, and its
2017 * friend vm_page_more_fictitious can block, so it doesn't seem like
2018 * this is used for interrupts any more....
2019 */
2020 void *
2021 zget(
2022 register zone_t zone)
2023 {
2024 vm_offset_t addr;
2025
2026 #if CONFIG_ZLEAKS
2027 uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used for zone leak detection */
2028 uint32_t zleak_tracedepth = 0; /* log this allocation if nonzero */
2029 #endif /* CONFIG_ZLEAKS */
2030
2031 assert( zone != ZONE_NULL );
2032
2033 if (!lock_try_zone(zone))
2034 return NULL;
2035
2036 #if CONFIG_ZLEAKS
2037 /*
2038 * Zone leak detection: capture a backtrace
2039 */
2040 if (zone->zleak_on && (zone->zleak_capture++ % zleak_sample_factor == 0)) {
2041 zone->zleak_capture = 1;
2042 zleak_tracedepth = fastbacktrace(zbt, MAX_ZTRACE_DEPTH);
2043 }
2044 #endif /* CONFIG_ZLEAKS */
2045
2046 alloc_from_zone(zone, (void **) &addr);
2047 #if ZONE_DEBUG
2048 if (addr && zone_debug_enabled(zone)) {
2049 enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
2050 addr += ZONE_DEBUG_OFFSET;
2051 }
2052 #endif /* ZONE_DEBUG */
2053
2054 #if CONFIG_ZLEAKS
2055 /*
2056 * Zone leak detection: record the allocation
2057 */
2058 if (zone->zleak_on && zleak_tracedepth > 0 && addr) {
2059 /* Sampling can fail if another sample is happening at the same time in a different zone. */
2060 if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) {
2061 /* If it failed, roll back the counter so we sample the next allocation instead. */
2062 zone->zleak_capture = zleak_sample_factor;
2063 }
2064 }
2065
2066 if (addr != 0) {
2067 zone->num_allocs++;
2068 }
2069 #endif /* CONFIG_ZLEAKS */
2070
2071 unlock_zone(zone);
2072
2073 return((void *) addr);
2074 }
2075
2076 /* Keep this FALSE by default. Large memory machine run orders of magnitude
2077 slower in debug mode when true. Use debugger to enable if needed */
2078 /* static */ boolean_t zone_check = FALSE;
2079
2080 static zone_t zone_last_bogus_zone = ZONE_NULL;
2081 static vm_offset_t zone_last_bogus_elem = 0;
2082
2083 void
2084 zfree(
2085 register zone_t zone,
2086 void *addr)
2087 {
2088 vm_offset_t elem = (vm_offset_t) addr;
2089 void *zbt[MAX_ZTRACE_DEPTH]; /* only used if zone logging is enabled via boot-args */
2090 int numsaved = 0;
2091 boolean_t gzfreed = FALSE;
2092
2093 assert(zone != ZONE_NULL);
2094
2095 /*
2096 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
2097 */
2098
2099 if (DO_LOGGING(zone))
2100 numsaved = OSBacktrace(&zbt[0], MAX_ZTRACE_DEPTH);
2101
2102 #if MACH_ASSERT
2103 /* Basic sanity checks */
2104 if (zone == ZONE_NULL || elem == (vm_offset_t)0)
2105 panic("zfree: NULL");
2106 /* zone_gc assumes zones are never freed */
2107 if (zone == zone_zone)
2108 panic("zfree: freeing to zone_zone breaks zone_gc!");
2109 #endif
2110
2111 #if CONFIG_GZALLOC
2112 gzfreed = gzalloc_free(zone, addr);
2113 #endif
2114
2115 TRACE_MACHLEAKS(ZFREE_CODE, ZFREE_CODE_2, zone->elem_size, (uintptr_t)addr);
2116
2117 if (__improbable(!gzfreed && zone->collectable && !zone->allows_foreign &&
2118 !from_zone_map(elem, zone->elem_size))) {
2119 #if MACH_ASSERT
2120 panic("zfree: non-allocated memory in collectable zone!");
2121 #endif
2122 zone_last_bogus_zone = zone;
2123 zone_last_bogus_elem = elem;
2124 return;
2125 }
2126
2127 lock_zone(zone);
2128
2129 /*
2130 * See if we're doing logging on this zone. There are two styles of logging used depending on
2131 * whether we're trying to catch a leak or corruption. See comments above in zalloc for details.
2132 */
2133
2134 if (DO_LOGGING(zone)) {
2135 int i;
2136
2137 if (corruption_debug_flag) {
2138
2139 /*
2140 * We're logging to catch a corruption. Add a record of this zfree operation
2141 * to log.
2142 */
2143
2144 if (zrecords[zcurrent].z_element == NULL)
2145 zrecorded++;
2146
2147 zrecords[zcurrent].z_element = (void *)addr;
2148 zrecords[zcurrent].z_time = ztime++;
2149 zrecords[zcurrent].z_opcode = ZOP_FREE;
2150
2151 for (i = 0; i < numsaved; i++)
2152 zrecords[zcurrent].z_pc[i] = zbt[i];
2153
2154 for (; i < MAX_ZTRACE_DEPTH; i++)
2155 zrecords[zcurrent].z_pc[i] = 0;
2156
2157 zcurrent++;
2158
2159 if (zcurrent >= log_records)
2160 zcurrent = 0;
2161
2162 } else {
2163
2164 /*
2165 * We're logging to catch a leak. Remove any record we might have for this
2166 * element since it's being freed. Note that we may not find it if the buffer
2167 * overflowed and that's OK. Since the log is of a limited size, old records
2168 * get overwritten if there are more zallocs than zfrees.
2169 */
2170
2171 for (i = 0; i < log_records; i++) {
2172 if (zrecords[i].z_element == addr) {
2173 zrecords[i].z_element = NULL;
2174 zcurrent = i;
2175 zrecorded--;
2176 break;
2177 }
2178 }
2179 }
2180 }
2181
2182
2183 #if ZONE_DEBUG
2184 if (!gzfreed && zone_debug_enabled(zone)) {
2185 queue_t tmp_elem;
2186
2187 elem -= ZONE_DEBUG_OFFSET;
2188 if (zone_check) {
2189 /* check the zone's consistency */
2190
2191 for (tmp_elem = queue_first(&zone->active_zones);
2192 !queue_end(tmp_elem, &zone->active_zones);
2193 tmp_elem = queue_next(tmp_elem))
2194 if (elem == (vm_offset_t)tmp_elem)
2195 break;
2196 if (elem != (vm_offset_t)tmp_elem)
2197 panic("zfree()ing element from wrong zone");
2198 }
2199 remqueue((queue_t) elem);
2200 }
2201 #endif /* ZONE_DEBUG */
2202 if (zone_check) {
2203 vm_offset_t this;
2204
2205 /* check the zone's consistency */
2206
2207 for (this = zone->free_elements;
2208 this != 0;
2209 this = * (vm_offset_t *) this)
2210 if (!pmap_kernel_va(this) || this == elem)
2211 panic("zfree");
2212 }
2213
2214 if (__probable(!gzfreed))
2215 free_to_zone(zone, (void *) elem);
2216
2217 #if MACH_ASSERT
2218 if (zone->count < 0)
2219 panic("zfree: count < 0!");
2220 #endif
2221
2222
2223 #if CONFIG_ZLEAKS
2224 zone->num_frees++;
2225
2226 /*
2227 * Zone leak detection: un-track the allocation
2228 */
2229 if (zone->zleak_on) {
2230 zleak_free(elem, zone->elem_size);
2231 }
2232 #endif /* CONFIG_ZLEAKS */
2233
2234 /*
2235 * If elements have one or more pages, and memory is low,
2236 * request to run the garbage collection in the zone the next
2237 * time the pageout thread runs.
2238 */
2239 if (zone->elem_size >= PAGE_SIZE &&
2240 vm_pool_low()){
2241 zone_gc_forced = TRUE;
2242 }
2243 unlock_zone(zone);
2244
2245 {
2246 thread_t thr = current_thread();
2247 task_t task;
2248 zinfo_usage_t zinfo;
2249 vm_size_t sz = zone->elem_size;
2250
2251 if (zone->caller_acct)
2252 ledger_debit(thr->t_ledger, task_ledgers.tkm_private, sz);
2253 else
2254 ledger_debit(thr->t_ledger, task_ledgers.tkm_shared, sz);
2255
2256 if ((task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
2257 OSAddAtomic64(sz, (int64_t *)&zinfo[zone->index].free);
2258 }
2259 }
2260
2261
2262 /* Change a zone's flags.
2263 * This routine must be called immediately after zinit.
2264 */
2265 void
2266 zone_change(
2267 zone_t zone,
2268 unsigned int item,
2269 boolean_t value)
2270 {
2271 assert( zone != ZONE_NULL );
2272 assert( value == TRUE || value == FALSE );
2273
2274 switch(item){
2275 case Z_NOENCRYPT:
2276 zone->noencrypt = value;
2277 break;
2278 case Z_EXHAUST:
2279 zone->exhaustible = value;
2280 break;
2281 case Z_COLLECT:
2282 zone->collectable = value;
2283 break;
2284 case Z_EXPAND:
2285 zone->expandable = value;
2286 break;
2287 case Z_FOREIGN:
2288 zone->allows_foreign = value;
2289 break;
2290 case Z_CALLERACCT:
2291 zone->caller_acct = value;
2292 break;
2293 case Z_NOCALLOUT:
2294 zone->no_callout = value;
2295 break;
2296 case Z_GZALLOC_EXEMPT:
2297 zone->gzalloc_exempt = value;
2298 #if CONFIG_GZALLOC
2299 gzalloc_reconfigure(zone);
2300 #endif
2301 break;
2302 case Z_ALIGNMENT_REQUIRED:
2303 zone->alignment_required = value;
2304 #if ZONE_DEBUG
2305 zone_debug_disable(zone);
2306 #endif
2307 #if CONFIG_GZALLOC
2308 gzalloc_reconfigure(zone);
2309 #endif
2310 break;
2311 default:
2312 panic("Zone_change: Wrong Item Type!");
2313 /* break; */
2314 }
2315 }
2316
2317 /*
2318 * Return the expected number of free elements in the zone.
2319 * This calculation will be incorrect if items are zfree'd that
2320 * were never zalloc'd/zget'd. The correct way to stuff memory
2321 * into a zone is by zcram.
2322 */
2323
2324 integer_t
2325 zone_free_count(zone_t zone)
2326 {
2327 integer_t free_count;
2328
2329 lock_zone(zone);
2330 free_count = (integer_t)(zone->cur_size/zone->elem_size - zone->count);
2331 unlock_zone(zone);
2332
2333 assert(free_count >= 0);
2334
2335 return(free_count);
2336 }
2337
2338 /*
2339 * Zone garbage collection subroutines
2340 */
2341
2342 boolean_t
2343 zone_page_collectable(
2344 vm_offset_t addr,
2345 vm_size_t size)
2346 {
2347 struct zone_page_table_entry *zp;
2348 zone_page_index_t i, j;
2349
2350 #if ZONE_ALIAS_ADDR
2351 addr = zone_virtual_addr(addr);
2352 #endif
2353 #if MACH_ASSERT
2354 if (!from_zone_map(addr, size))
2355 panic("zone_page_collectable");
2356 #endif
2357
2358 i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
2359 j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
2360
2361 for (; i <= j; i++) {
2362 zp = zone_page_table_lookup(i);
2363 if (zp->collect_count == zp->alloc_count)
2364 return (TRUE);
2365 }
2366
2367 return (FALSE);
2368 }
2369
2370 void
2371 zone_page_keep(
2372 vm_offset_t addr,
2373 vm_size_t size)
2374 {
2375 struct zone_page_table_entry *zp;
2376 zone_page_index_t i, j;
2377
2378 #if ZONE_ALIAS_ADDR
2379 addr = zone_virtual_addr(addr);
2380 #endif
2381 #if MACH_ASSERT
2382 if (!from_zone_map(addr, size))
2383 panic("zone_page_keep");
2384 #endif
2385
2386 i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
2387 j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
2388
2389 for (; i <= j; i++) {
2390 zp = zone_page_table_lookup(i);
2391 zp->collect_count = 0;
2392 }
2393 }
2394
2395 void
2396 zone_page_collect(
2397 vm_offset_t addr,
2398 vm_size_t size)
2399 {
2400 struct zone_page_table_entry *zp;
2401 zone_page_index_t i, j;
2402
2403 #if ZONE_ALIAS_ADDR
2404 addr = zone_virtual_addr(addr);
2405 #endif
2406 #if MACH_ASSERT
2407 if (!from_zone_map(addr, size))
2408 panic("zone_page_collect");
2409 #endif
2410
2411 i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
2412 j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
2413
2414 for (; i <= j; i++) {
2415 zp = zone_page_table_lookup(i);
2416 ++zp->collect_count;
2417 }
2418 }
2419
2420 void
2421 zone_page_init(
2422 vm_offset_t addr,
2423 vm_size_t size)
2424 {
2425 struct zone_page_table_entry *zp;
2426 zone_page_index_t i, j;
2427
2428 #if ZONE_ALIAS_ADDR
2429 addr = zone_virtual_addr(addr);
2430 #endif
2431 #if MACH_ASSERT
2432 if (!from_zone_map(addr, size))
2433 panic("zone_page_init");
2434 #endif
2435
2436 i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
2437 j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
2438
2439 for (; i <= j; i++) {
2440 /* make sure entry exists before marking unused */
2441 zone_page_table_expand(i);
2442
2443 zp = zone_page_table_lookup(i);
2444 assert(zp);
2445 zp->alloc_count = ZONE_PAGE_UNUSED;
2446 zp->collect_count = 0;
2447 }
2448 }
2449
2450 void
2451 zone_page_alloc(
2452 vm_offset_t addr,
2453 vm_size_t size)
2454 {
2455 struct zone_page_table_entry *zp;
2456 zone_page_index_t i, j;
2457
2458 #if ZONE_ALIAS_ADDR
2459 addr = zone_virtual_addr(addr);
2460 #endif
2461 #if MACH_ASSERT
2462 if (!from_zone_map(addr, size))
2463 panic("zone_page_alloc");
2464 #endif
2465
2466 i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
2467 j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
2468
2469 for (; i <= j; i++) {
2470 zp = zone_page_table_lookup(i);
2471 assert(zp);
2472
2473 /*
2474 * Set alloc_count to ZONE_PAGE_USED if
2475 * it was previously set to ZONE_PAGE_UNUSED.
2476 */
2477 if (zp->alloc_count == ZONE_PAGE_UNUSED)
2478 zp->alloc_count = ZONE_PAGE_USED;
2479
2480 ++zp->alloc_count;
2481 }
2482 }
2483
2484 void
2485 zone_page_free_element(
2486 zone_page_index_t *free_page_head,
2487 zone_page_index_t *free_page_tail,
2488 vm_offset_t addr,
2489 vm_size_t size)
2490 {
2491 struct zone_page_table_entry *zp;
2492 zone_page_index_t i, j;
2493
2494 #if ZONE_ALIAS_ADDR
2495 addr = zone_virtual_addr(addr);
2496 #endif
2497 #if MACH_ASSERT
2498 if (!from_zone_map(addr, size))
2499 panic("zone_page_free_element");
2500 #endif
2501
2502 i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
2503 j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
2504
2505 for (; i <= j; i++) {
2506 zp = zone_page_table_lookup(i);
2507
2508 if (zp->collect_count > 0)
2509 --zp->collect_count;
2510 if (--zp->alloc_count == 0) {
2511 vm_address_t free_page_address;
2512 vm_address_t prev_free_page_address;
2513
2514 zp->alloc_count = ZONE_PAGE_UNUSED;
2515 zp->collect_count = 0;
2516
2517
2518 /*
2519 * This element was the last one on this page, re-use the page's
2520 * storage for a page freelist
2521 */
2522 free_page_address = zone_map_min_address + PAGE_SIZE * ((vm_size_t)i);
2523 *(zone_page_index_t *)free_page_address = ZONE_PAGE_INDEX_INVALID;
2524
2525 if (*free_page_head == ZONE_PAGE_INDEX_INVALID) {
2526 *free_page_head = i;
2527 *free_page_tail = i;
2528 } else {
2529 prev_free_page_address = zone_map_min_address + PAGE_SIZE * ((vm_size_t)(*free_page_tail));
2530 *(zone_page_index_t *)prev_free_page_address = i;
2531 *free_page_tail = i;
2532 }
2533 }
2534 }
2535 }
2536
2537
2538 /* This is used for walking through a zone's free element list.
2539 */
2540 struct zone_free_element {
2541 struct zone_free_element * next;
2542 };
2543
2544 /*
2545 * Add a linked list of pages starting at base back into the zone
2546 * free list. Tail points to the last element on the list.
2547 */
2548 #define ADD_LIST_TO_ZONE(zone, base, tail) \
2549 MACRO_BEGIN \
2550 (tail)->next = (void *)((zone)->free_elements); \
2551 if ((zone)->elem_size >= (2 * sizeof(vm_offset_t) + sizeof(uint32_t))) { \
2552 ((vm_offset_t *)(tail))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
2553 (zone)->free_elements; \
2554 } \
2555 (zone)->free_elements = (unsigned long)(base); \
2556 MACRO_END
2557
2558 /*
2559 * Add an element to the chain pointed to by prev.
2560 */
2561 #define ADD_ELEMENT(zone, prev, elem) \
2562 MACRO_BEGIN \
2563 (prev)->next = (elem); \
2564 if ((zone)->elem_size >= (2 * sizeof(vm_offset_t) + sizeof(uint32_t))) { \
2565 ((vm_offset_t *)(prev))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
2566 (vm_offset_t)(elem); \
2567 } \
2568 MACRO_END
2569
2570 struct {
2571 uint32_t pgs_freed;
2572
2573 uint32_t elems_collected,
2574 elems_freed,
2575 elems_kept;
2576 } zgc_stats;
2577
2578 /* Zone garbage collection
2579 *
2580 * zone_gc will walk through all the free elements in all the
2581 * zones that are marked collectable looking for reclaimable
2582 * pages. zone_gc is called by consider_zone_gc when the system
2583 * begins to run out of memory.
2584 */
2585 void
2586 zone_gc(boolean_t all_zones)
2587 {
2588 unsigned int max_zones;
2589 zone_t z;
2590 unsigned int i;
2591 zone_page_index_t zone_free_page_head;
2592 zone_page_index_t zone_free_page_tail;
2593 thread_t mythread = current_thread();
2594
2595 lck_mtx_lock(&zone_gc_lock);
2596
2597 simple_lock(&all_zones_lock);
2598 max_zones = num_zones;
2599 z = first_zone;
2600 simple_unlock(&all_zones_lock);
2601
2602
2603 /*
2604 * it's ok to allow eager kernel preemption while
2605 * while holding a zone lock since it's taken
2606 * as a spin lock (which prevents preemption)
2607 */
2608 thread_set_eager_preempt(mythread);
2609
2610 #if MACH_ASSERT
2611 for (i = 0; i < zone_pages; i++) {
2612 struct zone_page_table_entry *zp;
2613
2614 zp = zone_page_table_lookup(i);
2615 assert(!zp || (zp->collect_count == 0));
2616 }
2617 #endif /* MACH_ASSERT */
2618
2619 for (i = 0; i < max_zones; i++, z = z->next_zone) {
2620 unsigned int n, m;
2621 vm_size_t elt_size, size_freed;
2622 struct zone_free_element *elt, *base_elt, *base_prev, *prev, *scan, *keep, *tail;
2623 int kmem_frees = 0;
2624
2625 assert(z != ZONE_NULL);
2626
2627 if (!z->collectable)
2628 continue;
2629
2630 if (all_zones == FALSE && z->elem_size < PAGE_SIZE)
2631 continue;
2632
2633 lock_zone(z);
2634
2635 elt_size = z->elem_size;
2636
2637 /*
2638 * Do a quick feasibility check before we scan the zone:
2639 * skip unless there is likelihood of getting pages back
2640 * (i.e we need a whole allocation block's worth of free
2641 * elements before we can garbage collect) and
2642 * the zone has more than 10 percent of it's elements free
2643 * or the element size is a multiple of the PAGE_SIZE
2644 */
2645 if ((elt_size & PAGE_MASK) &&
2646 (((z->cur_size - z->count * elt_size) <= (2 * z->alloc_size)) ||
2647 ((z->cur_size - z->count * elt_size) <= (z->cur_size / 10)))) {
2648 unlock_zone(z);
2649 continue;
2650 }
2651
2652 z->doing_gc = TRUE;
2653
2654 /*
2655 * Snatch all of the free elements away from the zone.
2656 */
2657
2658 scan = (void *)z->free_elements;
2659 z->free_elements = 0;
2660
2661 unlock_zone(z);
2662
2663 /*
2664 * Pass 1:
2665 *
2666 * Determine which elements we can attempt to collect
2667 * and count them up in the page table. Foreign elements
2668 * are returned to the zone.
2669 */
2670
2671 prev = (void *)&scan;
2672 elt = scan;
2673 n = 0; tail = keep = NULL;
2674
2675 zone_free_page_head = ZONE_PAGE_INDEX_INVALID;
2676 zone_free_page_tail = ZONE_PAGE_INDEX_INVALID;
2677
2678
2679 while (elt != NULL) {
2680 if (from_zone_map(elt, elt_size)) {
2681 zone_page_collect((vm_offset_t)elt, elt_size);
2682
2683 prev = elt;
2684 elt = elt->next;
2685
2686 ++zgc_stats.elems_collected;
2687 }
2688 else {
2689 if (keep == NULL)
2690 keep = tail = elt;
2691 else {
2692 ADD_ELEMENT(z, tail, elt);
2693 tail = elt;
2694 }
2695
2696 ADD_ELEMENT(z, prev, elt->next);
2697 elt = elt->next;
2698 ADD_ELEMENT(z, tail, NULL);
2699 }
2700
2701 /*
2702 * Dribble back the elements we are keeping.
2703 */
2704
2705 if (++n >= 50) {
2706 if (z->waiting == TRUE) {
2707 /* z->waiting checked without lock held, rechecked below after locking */
2708 lock_zone(z);
2709
2710 if (keep != NULL) {
2711 ADD_LIST_TO_ZONE(z, keep, tail);
2712 tail = keep = NULL;
2713 } else {
2714 m =0;
2715 base_elt = elt;
2716 base_prev = prev;
2717 while ((elt != NULL) && (++m < 50)) {
2718 prev = elt;
2719 elt = elt->next;
2720 }
2721 if (m !=0 ) {
2722 ADD_LIST_TO_ZONE(z, base_elt, prev);
2723 ADD_ELEMENT(z, base_prev, elt);
2724 prev = base_prev;
2725 }
2726 }
2727
2728 if (z->waiting) {
2729 z->waiting = FALSE;
2730 zone_wakeup(z);
2731 }
2732
2733 unlock_zone(z);
2734 }
2735 n =0;
2736 }
2737 }
2738
2739 /*
2740 * Return any remaining elements.
2741 */
2742
2743 if (keep != NULL) {
2744 lock_zone(z);
2745
2746 ADD_LIST_TO_ZONE(z, keep, tail);
2747
2748 if (z->waiting) {
2749 z->waiting = FALSE;
2750 zone_wakeup(z);
2751 }
2752
2753 unlock_zone(z);
2754 }
2755
2756 /*
2757 * Pass 2:
2758 *
2759 * Determine which pages we can reclaim and
2760 * free those elements.
2761 */
2762
2763 size_freed = 0;
2764 elt = scan;
2765 n = 0; tail = keep = NULL;
2766
2767 while (elt != NULL) {
2768 if (zone_page_collectable((vm_offset_t)elt, elt_size)) {
2769 struct zone_free_element *next_elt = elt->next;
2770
2771 size_freed += elt_size;
2772
2773 /*
2774 * If this is the last allocation on the page(s),
2775 * we may use their storage to maintain the linked
2776 * list of free-able pages. So store elt->next because
2777 * "elt" may be scribbled over.
2778 */
2779 zone_page_free_element(&zone_free_page_head, &zone_free_page_tail, (vm_offset_t)elt, elt_size);
2780
2781 elt = next_elt;
2782
2783 ++zgc_stats.elems_freed;
2784 }
2785 else {
2786 zone_page_keep((vm_offset_t)elt, elt_size);
2787
2788 if (keep == NULL)
2789 keep = tail = elt;
2790 else {
2791 ADD_ELEMENT(z, tail, elt);
2792 tail = elt;
2793 }
2794
2795 elt = elt->next;
2796 ADD_ELEMENT(z, tail, NULL);
2797
2798 ++zgc_stats.elems_kept;
2799 }
2800
2801 /*
2802 * Dribble back the elements we are keeping,
2803 * and update the zone size info.
2804 */
2805
2806 if (++n >= 50) {
2807 lock_zone(z);
2808
2809 z->cur_size -= size_freed;
2810 size_freed = 0;
2811
2812 if (keep != NULL) {
2813 ADD_LIST_TO_ZONE(z, keep, tail);
2814 }
2815
2816 if (z->waiting) {
2817 z->waiting = FALSE;
2818 zone_wakeup(z);
2819 }
2820
2821 unlock_zone(z);
2822
2823 n = 0; tail = keep = NULL;
2824 }
2825 }
2826
2827 /*
2828 * Return any remaining elements, and update
2829 * the zone size info.
2830 */
2831
2832 lock_zone(z);
2833
2834 if (size_freed > 0 || keep != NULL) {
2835
2836 z->cur_size -= size_freed;
2837
2838 if (keep != NULL) {
2839 ADD_LIST_TO_ZONE(z, keep, tail);
2840 }
2841
2842 }
2843
2844 z->doing_gc = FALSE;
2845 if (z->waiting) {
2846 z->waiting = FALSE;
2847 zone_wakeup(z);
2848 }
2849 unlock_zone(z);
2850
2851
2852 if (zone_free_page_head == ZONE_PAGE_INDEX_INVALID)
2853 continue;
2854
2855 /*
2856 * we don't want to allow eager kernel preemption while holding the
2857 * various locks taken in the kmem_free path of execution
2858 */
2859 thread_clear_eager_preempt(mythread);
2860
2861 /*
2862 * Reclaim the pages we are freeing.
2863 */
2864 while (zone_free_page_head != ZONE_PAGE_INDEX_INVALID) {
2865 zone_page_index_t zind = zone_free_page_head;
2866 vm_address_t free_page_address;
2867 int page_count;
2868
2869 /*
2870 * Use the first word of the page about to be freed to find the next free page
2871 */
2872 free_page_address = zone_map_min_address + PAGE_SIZE * ((vm_size_t)zind);
2873 zone_free_page_head = *(zone_page_index_t *)free_page_address;
2874
2875 page_count = 1;
2876
2877 while (zone_free_page_head != ZONE_PAGE_INDEX_INVALID) {
2878 zone_page_index_t next_zind = zone_free_page_head;
2879 vm_address_t next_free_page_address;
2880
2881 next_free_page_address = zone_map_min_address + PAGE_SIZE * ((vm_size_t)next_zind);
2882
2883 if (next_free_page_address == (free_page_address - PAGE_SIZE)) {
2884 free_page_address = next_free_page_address;
2885 } else if (next_free_page_address != (free_page_address + (PAGE_SIZE * page_count)))
2886 break;
2887
2888 zone_free_page_head = *(zone_page_index_t *)next_free_page_address;
2889 page_count++;
2890 }
2891 kmem_free(zone_map, free_page_address, page_count * PAGE_SIZE);
2892
2893 zgc_stats.pgs_freed += page_count;
2894
2895 if (++kmem_frees == 32) {
2896 thread_yield_internal(1);
2897 kmem_frees = 0;
2898 }
2899 }
2900 thread_set_eager_preempt(mythread);
2901 }
2902 thread_clear_eager_preempt(mythread);
2903
2904 lck_mtx_unlock(&zone_gc_lock);
2905
2906 }
2907
2908 extern vm_offset_t kmapoff_kaddr;
2909 extern unsigned int kmapoff_pgcnt;
2910
2911 /*
2912 * consider_zone_gc:
2913 *
2914 * Called by the pageout daemon when the system needs more free pages.
2915 */
2916
2917 void
2918 consider_zone_gc(boolean_t force)
2919 {
2920 boolean_t all_zones = FALSE;
2921
2922 if (kmapoff_kaddr != 0) {
2923 /*
2924 * One-time reclaim of kernel_map resources we allocated in
2925 * early boot.
2926 */
2927 (void) vm_deallocate(kernel_map,
2928 kmapoff_kaddr, kmapoff_pgcnt * PAGE_SIZE_64);
2929 kmapoff_kaddr = 0;
2930 }
2931
2932 if (zone_gc_allowed &&
2933 (zone_gc_allowed_by_time_throttle ||
2934 zone_gc_forced ||
2935 force)) {
2936 if (zone_gc_allowed_by_time_throttle == TRUE) {
2937 zone_gc_allowed_by_time_throttle = FALSE;
2938 all_zones = TRUE;
2939 }
2940 zone_gc_forced = FALSE;
2941
2942 zone_gc(all_zones);
2943 }
2944 }
2945
2946 /*
2947 * By default, don't attempt zone GC more frequently
2948 * than once / 1 minutes.
2949 */
2950 void
2951 compute_zone_gc_throttle(void *arg __unused)
2952 {
2953 zone_gc_allowed_by_time_throttle = TRUE;
2954 }
2955
2956
2957 #if CONFIG_TASK_ZONE_INFO
2958
2959 kern_return_t
2960 task_zone_info(
2961 task_t task,
2962 mach_zone_name_array_t *namesp,
2963 mach_msg_type_number_t *namesCntp,
2964 task_zone_info_array_t *infop,
2965 mach_msg_type_number_t *infoCntp)
2966 {
2967 mach_zone_name_t *names;
2968 vm_offset_t names_addr;
2969 vm_size_t names_size;
2970 task_zone_info_t *info;
2971 vm_offset_t info_addr;
2972 vm_size_t info_size;
2973 unsigned int max_zones, i;
2974 zone_t z;
2975 mach_zone_name_t *zn;
2976 task_zone_info_t *zi;
2977 kern_return_t kr;
2978
2979 vm_size_t used;
2980 vm_map_copy_t copy;
2981
2982
2983 if (task == TASK_NULL)
2984 return KERN_INVALID_TASK;
2985
2986 /*
2987 * We assume that zones aren't freed once allocated.
2988 * We won't pick up any zones that are allocated later.
2989 */
2990
2991 simple_lock(&all_zones_lock);
2992 max_zones = (unsigned int)(num_zones + num_fake_zones);
2993 z = first_zone;
2994 simple_unlock(&all_zones_lock);
2995
2996 names_size = round_page(max_zones * sizeof *names);
2997 kr = kmem_alloc_pageable(ipc_kernel_map,
2998 &names_addr, names_size);
2999 if (kr != KERN_SUCCESS)
3000 return kr;
3001 names = (mach_zone_name_t *) names_addr;
3002
3003 info_size = round_page(max_zones * sizeof *info);
3004 kr = kmem_alloc_pageable(ipc_kernel_map,
3005 &info_addr, info_size);
3006 if (kr != KERN_SUCCESS) {
3007 kmem_free(ipc_kernel_map,
3008 names_addr, names_size);
3009 return kr;
3010 }
3011
3012 info = (task_zone_info_t *) info_addr;
3013
3014 zn = &names[0];
3015 zi = &info[0];
3016
3017 for (i = 0; i < max_zones - num_fake_zones; i++) {
3018 struct zone zcopy;
3019
3020 assert(z != ZONE_NULL);
3021
3022 lock_zone(z);
3023 zcopy = *z;
3024 unlock_zone(z);
3025
3026 simple_lock(&all_zones_lock);
3027 z = z->next_zone;
3028 simple_unlock(&all_zones_lock);
3029
3030 /* assuming here the name data is static */
3031 (void) strncpy(zn->mzn_name, zcopy.zone_name,
3032 sizeof zn->mzn_name);
3033 zn->mzn_name[sizeof zn->mzn_name - 1] = '\0';
3034
3035 zi->tzi_count = (uint64_t)zcopy.count;
3036 zi->tzi_cur_size = (uint64_t)zcopy.cur_size;
3037 zi->tzi_max_size = (uint64_t)zcopy.max_size;
3038 zi->tzi_elem_size = (uint64_t)zcopy.elem_size;
3039 zi->tzi_alloc_size = (uint64_t)zcopy.alloc_size;
3040 zi->tzi_sum_size = zcopy.sum_count * zcopy.elem_size;
3041 zi->tzi_exhaustible = (uint64_t)zcopy.exhaustible;
3042 zi->tzi_collectable = (uint64_t)zcopy.collectable;
3043 zi->tzi_caller_acct = (uint64_t)zcopy.caller_acct;
3044 if (task->tkm_zinfo != NULL) {
3045 zi->tzi_task_alloc = task->tkm_zinfo[zcopy.index].alloc;
3046 zi->tzi_task_free = task->tkm_zinfo[zcopy.index].free;
3047 } else {
3048 zi->tzi_task_alloc = 0;
3049 zi->tzi_task_free = 0;
3050 }
3051 zn++;
3052 zi++;
3053 }
3054
3055 /*
3056 * loop through the fake zones and fill them using the specialized
3057 * functions
3058 */
3059 for (i = 0; i < num_fake_zones; i++) {
3060 int count, collectable, exhaustible, caller_acct, index;
3061 vm_size_t cur_size, max_size, elem_size, alloc_size;
3062 uint64_t sum_size;
3063
3064 strncpy(zn->mzn_name, fake_zones[i].name, sizeof zn->mzn_name);
3065 zn->mzn_name[sizeof zn->mzn_name - 1] = '\0';
3066 fake_zones[i].query(&count, &cur_size,
3067 &max_size, &elem_size,
3068 &alloc_size, &sum_size,
3069 &collectable, &exhaustible, &caller_acct);
3070 zi->tzi_count = (uint64_t)count;
3071 zi->tzi_cur_size = (uint64_t)cur_size;
3072 zi->tzi_max_size = (uint64_t)max_size;
3073 zi->tzi_elem_size = (uint64_t)elem_size;
3074 zi->tzi_alloc_size = (uint64_t)alloc_size;
3075 zi->tzi_sum_size = sum_size;
3076 zi->tzi_collectable = (uint64_t)collectable;
3077 zi->tzi_exhaustible = (uint64_t)exhaustible;
3078 zi->tzi_caller_acct = (uint64_t)caller_acct;
3079 if (task->tkm_zinfo != NULL) {
3080 index = ZINFO_SLOTS - num_fake_zones + i;
3081 zi->tzi_task_alloc = task->tkm_zinfo[index].alloc;
3082 zi->tzi_task_free = task->tkm_zinfo[index].free;
3083 } else {
3084 zi->tzi_task_alloc = 0;
3085 zi->tzi_task_free = 0;
3086 }
3087 zn++;
3088 zi++;
3089 }
3090
3091 used = max_zones * sizeof *names;
3092 if (used != names_size)
3093 bzero((char *) (names_addr + used), names_size - used);
3094
3095 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
3096 (vm_map_size_t)names_size, TRUE, &copy);
3097 assert(kr == KERN_SUCCESS);
3098
3099 *namesp = (mach_zone_name_t *) copy;
3100 *namesCntp = max_zones;
3101
3102 used = max_zones * sizeof *info;
3103
3104 if (used != info_size)
3105 bzero((char *) (info_addr + used), info_size - used);
3106
3107 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
3108 (vm_map_size_t)info_size, TRUE, &copy);
3109 assert(kr == KERN_SUCCESS);
3110
3111 *infop = (task_zone_info_t *) copy;
3112 *infoCntp = max_zones;
3113
3114 return KERN_SUCCESS;
3115 }
3116
3117 #else /* CONFIG_TASK_ZONE_INFO */
3118
3119 kern_return_t
3120 task_zone_info(
3121 __unused task_t task,
3122 __unused mach_zone_name_array_t *namesp,
3123 __unused mach_msg_type_number_t *namesCntp,
3124 __unused task_zone_info_array_t *infop,
3125 __unused mach_msg_type_number_t *infoCntp)
3126 {
3127 return KERN_FAILURE;
3128 }
3129
3130 #endif /* CONFIG_TASK_ZONE_INFO */
3131
3132 kern_return_t
3133 mach_zone_info(
3134 host_priv_t host,
3135 mach_zone_name_array_t *namesp,
3136 mach_msg_type_number_t *namesCntp,
3137 mach_zone_info_array_t *infop,
3138 mach_msg_type_number_t *infoCntp)
3139 {
3140 mach_zone_name_t *names;
3141 vm_offset_t names_addr;
3142 vm_size_t names_size;
3143 mach_zone_info_t *info;
3144 vm_offset_t info_addr;
3145 vm_size_t info_size;
3146 unsigned int max_zones, i;
3147 zone_t z;
3148 mach_zone_name_t *zn;
3149 mach_zone_info_t *zi;
3150 kern_return_t kr;
3151
3152 vm_size_t used;
3153 vm_map_copy_t copy;
3154
3155
3156 if (host == HOST_NULL)
3157 return KERN_INVALID_HOST;
3158 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
3159 if (!PE_i_can_has_debugger(NULL))
3160 return KERN_INVALID_HOST;
3161 #endif
3162
3163 /*
3164 * We assume that zones aren't freed once allocated.
3165 * We won't pick up any zones that are allocated later.
3166 */
3167
3168 simple_lock(&all_zones_lock);
3169 max_zones = (unsigned int)(num_zones + num_fake_zones);
3170 z = first_zone;
3171 simple_unlock(&all_zones_lock);
3172
3173 names_size = round_page(max_zones * sizeof *names);
3174 kr = kmem_alloc_pageable(ipc_kernel_map,
3175 &names_addr, names_size);
3176 if (kr != KERN_SUCCESS)
3177 return kr;
3178 names = (mach_zone_name_t *) names_addr;
3179
3180 info_size = round_page(max_zones * sizeof *info);
3181 kr = kmem_alloc_pageable(ipc_kernel_map,
3182 &info_addr, info_size);
3183 if (kr != KERN_SUCCESS) {
3184 kmem_free(ipc_kernel_map,
3185 names_addr, names_size);
3186 return kr;
3187 }
3188
3189 info = (mach_zone_info_t *) info_addr;
3190
3191 zn = &names[0];
3192 zi = &info[0];
3193
3194 for (i = 0; i < max_zones - num_fake_zones; i++) {
3195 struct zone zcopy;
3196
3197 assert(z != ZONE_NULL);
3198
3199 lock_zone(z);
3200 zcopy = *z;
3201 unlock_zone(z);
3202
3203 simple_lock(&all_zones_lock);
3204 z = z->next_zone;
3205 simple_unlock(&all_zones_lock);
3206
3207 /* assuming here the name data is static */
3208 (void) strncpy(zn->mzn_name, zcopy.zone_name,
3209 sizeof zn->mzn_name);
3210 zn->mzn_name[sizeof zn->mzn_name - 1] = '\0';
3211
3212 zi->mzi_count = (uint64_t)zcopy.count;
3213 zi->mzi_cur_size = (uint64_t)zcopy.cur_size;
3214 zi->mzi_max_size = (uint64_t)zcopy.max_size;
3215 zi->mzi_elem_size = (uint64_t)zcopy.elem_size;
3216 zi->mzi_alloc_size = (uint64_t)zcopy.alloc_size;
3217 zi->mzi_sum_size = zcopy.sum_count * zcopy.elem_size;
3218 zi->mzi_exhaustible = (uint64_t)zcopy.exhaustible;
3219 zi->mzi_collectable = (uint64_t)zcopy.collectable;
3220 zn++;
3221 zi++;
3222 }
3223
3224 /*
3225 * loop through the fake zones and fill them using the specialized
3226 * functions
3227 */
3228 for (i = 0; i < num_fake_zones; i++) {
3229 int count, collectable, exhaustible, caller_acct;
3230 vm_size_t cur_size, max_size, elem_size, alloc_size;
3231 uint64_t sum_size;
3232
3233 strncpy(zn->mzn_name, fake_zones[i].name, sizeof zn->mzn_name);
3234 zn->mzn_name[sizeof zn->mzn_name - 1] = '\0';
3235 fake_zones[i].query(&count, &cur_size,
3236 &max_size, &elem_size,
3237 &alloc_size, &sum_size,
3238 &collectable, &exhaustible, &caller_acct);
3239 zi->mzi_count = (uint64_t)count;
3240 zi->mzi_cur_size = (uint64_t)cur_size;
3241 zi->mzi_max_size = (uint64_t)max_size;
3242 zi->mzi_elem_size = (uint64_t)elem_size;
3243 zi->mzi_alloc_size = (uint64_t)alloc_size;
3244 zi->mzi_sum_size = sum_size;
3245 zi->mzi_collectable = (uint64_t)collectable;
3246 zi->mzi_exhaustible = (uint64_t)exhaustible;
3247
3248 zn++;
3249 zi++;
3250 }
3251
3252 used = max_zones * sizeof *names;
3253 if (used != names_size)
3254 bzero((char *) (names_addr + used), names_size - used);
3255
3256 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
3257 (vm_map_size_t)names_size, TRUE, &copy);
3258 assert(kr == KERN_SUCCESS);
3259
3260 *namesp = (mach_zone_name_t *) copy;
3261 *namesCntp = max_zones;
3262
3263 used = max_zones * sizeof *info;
3264
3265 if (used != info_size)
3266 bzero((char *) (info_addr + used), info_size - used);
3267
3268 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
3269 (vm_map_size_t)info_size, TRUE, &copy);
3270 assert(kr == KERN_SUCCESS);
3271
3272 *infop = (mach_zone_info_t *) copy;
3273 *infoCntp = max_zones;
3274
3275 return KERN_SUCCESS;
3276 }
3277
3278 /*
3279 * host_zone_info - LEGACY user interface for Mach zone information
3280 * Should use mach_zone_info() instead!
3281 */
3282 kern_return_t
3283 host_zone_info(
3284 host_priv_t host,
3285 zone_name_array_t *namesp,
3286 mach_msg_type_number_t *namesCntp,
3287 zone_info_array_t *infop,
3288 mach_msg_type_number_t *infoCntp)
3289 {
3290 zone_name_t *names;
3291 vm_offset_t names_addr;
3292 vm_size_t names_size;
3293 zone_info_t *info;
3294 vm_offset_t info_addr;
3295 vm_size_t info_size;
3296 unsigned int max_zones, i;
3297 zone_t z;
3298 zone_name_t *zn;
3299 zone_info_t *zi;
3300 kern_return_t kr;
3301
3302 vm_size_t used;
3303 vm_map_copy_t copy;
3304
3305
3306 if (host == HOST_NULL)
3307 return KERN_INVALID_HOST;
3308 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
3309 if (!PE_i_can_has_debugger(NULL))
3310 return KERN_INVALID_HOST;
3311 #endif
3312
3313 #if defined(__LP64__)
3314 if (!thread_is_64bit(current_thread()))
3315 return KERN_NOT_SUPPORTED;
3316 #else
3317 if (thread_is_64bit(current_thread()))
3318 return KERN_NOT_SUPPORTED;
3319 #endif
3320
3321 /*
3322 * We assume that zones aren't freed once allocated.
3323 * We won't pick up any zones that are allocated later.
3324 */
3325
3326 simple_lock(&all_zones_lock);
3327 max_zones = (unsigned int)(num_zones + num_fake_zones);
3328 z = first_zone;
3329 simple_unlock(&all_zones_lock);
3330
3331 names_size = round_page(max_zones * sizeof *names);
3332 kr = kmem_alloc_pageable(ipc_kernel_map,
3333 &names_addr, names_size);
3334 if (kr != KERN_SUCCESS)
3335 return kr;
3336 names = (zone_name_t *) names_addr;
3337
3338 info_size = round_page(max_zones * sizeof *info);
3339 kr = kmem_alloc_pageable(ipc_kernel_map,
3340 &info_addr, info_size);
3341 if (kr != KERN_SUCCESS) {
3342 kmem_free(ipc_kernel_map,
3343 names_addr, names_size);
3344 return kr;
3345 }
3346
3347 info = (zone_info_t *) info_addr;
3348
3349 zn = &names[0];
3350 zi = &info[0];
3351
3352 for (i = 0; i < max_zones - num_fake_zones; i++) {
3353 struct zone zcopy;
3354
3355 assert(z != ZONE_NULL);
3356
3357 lock_zone(z);
3358 zcopy = *z;
3359 unlock_zone(z);
3360
3361 simple_lock(&all_zones_lock);
3362 z = z->next_zone;
3363 simple_unlock(&all_zones_lock);
3364
3365 /* assuming here the name data is static */
3366 (void) strncpy(zn->zn_name, zcopy.zone_name,
3367 sizeof zn->zn_name);
3368 zn->zn_name[sizeof zn->zn_name - 1] = '\0';
3369
3370 zi->zi_count = zcopy.count;
3371 zi->zi_cur_size = zcopy.cur_size;
3372 zi->zi_max_size = zcopy.max_size;
3373 zi->zi_elem_size = zcopy.elem_size;
3374 zi->zi_alloc_size = zcopy.alloc_size;
3375 zi->zi_exhaustible = zcopy.exhaustible;
3376 zi->zi_collectable = zcopy.collectable;
3377
3378 zn++;
3379 zi++;
3380 }
3381
3382 /*
3383 * loop through the fake zones and fill them using the specialized
3384 * functions
3385 */
3386 for (i = 0; i < num_fake_zones; i++) {
3387 int caller_acct;
3388 uint64_t sum_space;
3389 strncpy(zn->zn_name, fake_zones[i].name, sizeof zn->zn_name);
3390 zn->zn_name[sizeof zn->zn_name - 1] = '\0';
3391 fake_zones[i].query(&zi->zi_count, &zi->zi_cur_size,
3392 &zi->zi_max_size, &zi->zi_elem_size,
3393 &zi->zi_alloc_size, &sum_space,
3394 &zi->zi_collectable, &zi->zi_exhaustible, &caller_acct);
3395 zn++;
3396 zi++;
3397 }
3398
3399 used = max_zones * sizeof *names;
3400 if (used != names_size)
3401 bzero((char *) (names_addr + used), names_size - used);
3402
3403 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
3404 (vm_map_size_t)names_size, TRUE, &copy);
3405 assert(kr == KERN_SUCCESS);
3406
3407 *namesp = (zone_name_t *) copy;
3408 *namesCntp = max_zones;
3409
3410 used = max_zones * sizeof *info;
3411 if (used != info_size)
3412 bzero((char *) (info_addr + used), info_size - used);
3413
3414 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
3415 (vm_map_size_t)info_size, TRUE, &copy);
3416 assert(kr == KERN_SUCCESS);
3417
3418 *infop = (zone_info_t *) copy;
3419 *infoCntp = max_zones;
3420
3421 return KERN_SUCCESS;
3422 }
3423
3424 kern_return_t
3425 mach_zone_force_gc(
3426 host_t host)
3427 {
3428
3429 if (host == HOST_NULL)
3430 return KERN_INVALID_HOST;
3431
3432 consider_zone_gc(TRUE);
3433
3434 return (KERN_SUCCESS);
3435 }
3436
3437 extern unsigned int stack_total;
3438 extern unsigned long long stack_allocs;
3439
3440 #if defined(__i386__) || defined (__x86_64__)
3441 extern unsigned int inuse_ptepages_count;
3442 extern long long alloc_ptepages_count;
3443 #endif
3444
3445 void zone_display_zprint()
3446 {
3447 unsigned int i;
3448 zone_t the_zone;
3449
3450 if(first_zone!=NULL) {
3451 the_zone = first_zone;
3452 for (i = 0; i < num_zones; i++) {
3453 if(the_zone->cur_size > (1024*1024)) {
3454 printf("%.20s:\t%lu\n",the_zone->zone_name,(uintptr_t)the_zone->cur_size);
3455 }
3456
3457 if(the_zone->next_zone == NULL) {
3458 break;
3459 }
3460
3461 the_zone = the_zone->next_zone;
3462 }
3463 }
3464
3465 printf("Kernel Stacks:\t%lu\n",(uintptr_t)(kernel_stack_size * stack_total));
3466
3467 #if defined(__i386__) || defined (__x86_64__)
3468 printf("PageTables:\t%lu\n",(uintptr_t)(PAGE_SIZE * inuse_ptepages_count));
3469 #endif
3470
3471 printf("Kalloc.Large:\t%lu\n",(uintptr_t)kalloc_large_total);
3472 }
3473
3474 #if ZONE_DEBUG
3475
3476 /* should we care about locks here ? */
3477
3478 #define zone_in_use(z) ( z->count || z->free_elements )
3479
3480 void
3481 zone_debug_enable(
3482 zone_t z)
3483 {
3484 if (zone_debug_enabled(z) || zone_in_use(z) ||
3485 z->alloc_size < (z->elem_size + ZONE_DEBUG_OFFSET))
3486 return;
3487 queue_init(&z->active_zones);
3488 z->elem_size += ZONE_DEBUG_OFFSET;
3489 }
3490
3491 void
3492 zone_debug_disable(
3493 zone_t z)
3494 {
3495 if (!zone_debug_enabled(z) || zone_in_use(z))
3496 return;
3497 z->elem_size -= ZONE_DEBUG_OFFSET;
3498 z->active_zones.next = z->active_zones.prev = NULL;
3499 }
3500
3501
3502 #endif /* ZONE_DEBUG */