]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/zalloc.c
d3e27f32e84aed145840fbf018e99226bf48391b
[apple/xnu.git] / osfmk / kern / zalloc.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55 /*
56 * File: kern/zalloc.c
57 * Author: Avadis Tevanian, Jr.
58 *
59 * Zone-based memory allocator. A zone is a collection of fixed size
60 * data blocks for which quick allocation/deallocation is possible.
61 */
62 #include <zone_debug.h>
63 #include <norma_vm.h>
64 #include <mach_kdb.h>
65 #include <kern/ast.h>
66 #include <kern/assert.h>
67 #include <kern/macro_help.h>
68 #include <kern/sched.h>
69 #include <kern/lock.h>
70 #include <kern/sched_prim.h>
71 #include <kern/misc_protos.h>
72 #include <kern/thread_call.h>
73 #include <kern/zalloc.h>
74 #include <mach/vm_param.h>
75 #include <vm/vm_kern.h>
76 #include <machine/machparam.h>
77
78
79 #if MACH_ASSERT
80 /* Detect use of zone elt after freeing it by two methods:
81 * (1) Range-check the free-list "next" ptr for sanity.
82 * (2) Store the ptr in two different words, and compare them against
83 * each other when re-using the zone elt, to detect modifications;
84 */
85
86 #if defined(__alpha)
87
88 #define is_kernel_data_addr(a) \
89 (!(a) || IS_SYS_VA(a) && !((a) & (sizeof(long)-1)))
90
91 #else /* !defined(__alpha) */
92
93 #define is_kernel_data_addr(a) \
94 (!(a) || (a) >= VM_MIN_KERNEL_ADDRESS && !((a) & 0x3))
95
96 #endif /* defined(__alpha) */
97
98 /* Should we set all words of the zone element to an illegal address
99 * when it is freed, to help catch usage after freeing? The down-side
100 * is that this obscures the identity of the freed element.
101 */
102 boolean_t zfree_clear = FALSE;
103
104 #define ADD_TO_ZONE(zone, element) \
105 MACRO_BEGIN \
106 if (zfree_clear) \
107 { int i; \
108 for (i=1; \
109 i < zone->elem_size/sizeof(vm_offset_t) - 1; \
110 i++) \
111 ((vm_offset_t *)(element))[i] = 0xdeadbeef; \
112 } \
113 ((vm_offset_t *)(element))[0] = (zone)->free_elements; \
114 (zone)->free_elements = (vm_offset_t) (element); \
115 (zone)->count--; \
116 MACRO_END
117
118 #define REMOVE_FROM_ZONE(zone, ret, type) \
119 MACRO_BEGIN \
120 (ret) = (type) (zone)->free_elements; \
121 if ((ret) != (type) 0) { \
122 if (!is_kernel_data_addr(((vm_offset_t *)(ret))[0])) { \
123 panic("A freed zone element has been modified.\n"); \
124 } \
125 (zone)->count++; \
126 (zone)->free_elements = *((vm_offset_t *)(ret)); \
127 } \
128 MACRO_END
129 #else /* MACH_ASSERT */
130
131 #define ADD_TO_ZONE(zone, element) \
132 MACRO_BEGIN \
133 *((vm_offset_t *)(element)) = (zone)->free_elements; \
134 (zone)->free_elements = (vm_offset_t) (element); \
135 (zone)->count--; \
136 MACRO_END
137
138 #define REMOVE_FROM_ZONE(zone, ret, type) \
139 MACRO_BEGIN \
140 (ret) = (type) (zone)->free_elements; \
141 if ((ret) != (type) 0) { \
142 (zone)->count++; \
143 (zone)->free_elements = *((vm_offset_t *)(ret)); \
144 } \
145 MACRO_END
146
147 #endif /* MACH_ASSERT */
148
149 #if ZONE_DEBUG
150 #define zone_debug_enabled(z) z->active_zones.next
151 #define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y))
152 #define ZONE_DEBUG_OFFSET ROUNDUP(sizeof(queue_chain_t),16)
153 #endif /* ZONE_DEBUG */
154
155 /*
156 * Support for garbage collection of unused zone pages:
157 */
158
159 struct zone_page_table_entry {
160 struct zone_page_table_entry *link;
161 short alloc_count;
162 short collect_count;
163 };
164
165 /* Forwards */
166 void zone_page_init(
167 vm_offset_t addr,
168 vm_size_t size,
169 int value);
170
171 void zone_page_alloc(
172 vm_offset_t addr,
173 vm_size_t size);
174
175 void zone_page_free_element(
176 struct zone_page_table_entry **free_pages,
177 vm_offset_t addr,
178 vm_size_t size);
179
180 void zone_page_collect(
181 vm_offset_t addr,
182 vm_size_t size);
183
184 boolean_t zone_page_collectable(
185 vm_offset_t addr,
186 vm_size_t size);
187
188 void zone_page_keep(
189 vm_offset_t addr,
190 vm_size_t size);
191
192 void zalloc_async(
193 thread_call_param_t p0,
194 thread_call_param_t p1);
195
196
197 #if ZONE_DEBUG && MACH_KDB
198 int zone_count(
199 zone_t z,
200 int tail);
201 #endif /* ZONE_DEBUG && MACH_KDB */
202
203 vm_map_t zone_map = VM_MAP_NULL;
204
205 zone_t zone_zone = ZONE_NULL; /* the zone containing other zones */
206
207 /*
208 * The VM system gives us an initial chunk of memory.
209 * It has to be big enough to allocate the zone_zone
210 */
211
212 vm_offset_t zdata;
213 vm_size_t zdata_size;
214
215 #define lock_zone(zone) \
216 MACRO_BEGIN \
217 simple_lock(&(zone)->lock); \
218 MACRO_END
219
220 #define unlock_zone(zone) \
221 MACRO_BEGIN \
222 simple_unlock(&(zone)->lock); \
223 MACRO_END
224
225 #define zone_wakeup(zone) thread_wakeup((event_t)(zone))
226 #define zone_sleep(zone) \
227 thread_sleep_simple_lock((event_t)(zone), \
228 &(zone)->lock, \
229 THREAD_UNINT)
230
231 #define lock_zone_init(zone) \
232 MACRO_BEGIN \
233 simple_lock_init(&zone->lock, ETAP_MISC_ZONE); \
234 MACRO_END
235
236 #define lock_try_zone(zone) simple_lock_try(&zone->lock)
237
238 kern_return_t zget_space(
239 vm_offset_t size,
240 vm_offset_t *result);
241
242 decl_simple_lock_data(,zget_space_lock)
243 vm_offset_t zalloc_next_space;
244 vm_offset_t zalloc_end_of_space;
245 vm_size_t zalloc_wasted_space;
246
247 /*
248 * Garbage collection map information
249 */
250 struct zone_page_table_entry * zone_page_table;
251 vm_offset_t zone_map_min_address;
252 vm_offset_t zone_map_max_address;
253 integer_t zone_pages;
254
255 /*
256 * Exclude more than one concurrent garbage collection
257 */
258 decl_mutex_data(, zone_gc_lock)
259
260 #define from_zone_map(addr, size) \
261 ((vm_offset_t)(addr) >= zone_map_min_address && \
262 ((vm_offset_t)(addr) + size -1) < zone_map_max_address)
263
264 #define ZONE_PAGE_USED 0
265 #define ZONE_PAGE_UNUSED -1
266
267
268 /*
269 * Protects first_zone, last_zone, num_zones,
270 * and the next_zone field of zones.
271 */
272 decl_simple_lock_data(, all_zones_lock)
273 zone_t first_zone;
274 zone_t *last_zone;
275 int num_zones;
276
277 boolean_t zone_gc_allowed = TRUE;
278 boolean_t zone_gc_forced = FALSE;
279 unsigned zone_gc_last_tick = 0;
280 unsigned zone_gc_max_rate = 0; /* in ticks */
281
282
283 /*
284 * zinit initializes a new zone. The zone data structures themselves
285 * are stored in a zone, which is initially a static structure that
286 * is initialized by zone_init.
287 */
288 zone_t
289 zinit(
290 vm_size_t size, /* the size of an element */
291 vm_size_t max, /* maximum memory to use */
292 vm_size_t alloc, /* allocation size */
293 char *name) /* a name for the zone */
294 {
295 zone_t z;
296
297 if (zone_zone == ZONE_NULL) {
298 if (zget_space(sizeof(struct zone), (vm_offset_t *)&z)
299 != KERN_SUCCESS)
300 return(ZONE_NULL);
301 } else
302 z = (zone_t) zalloc(zone_zone);
303 if (z == ZONE_NULL)
304 return(ZONE_NULL);
305
306 /*
307 * Round off all the parameters appropriately.
308 */
309 if (size < sizeof(z->free_elements))
310 size = sizeof(z->free_elements);
311 size = ((size-1) + sizeof(z->free_elements)) -
312 ((size-1) % sizeof(z->free_elements));
313 if (alloc == 0)
314 alloc = PAGE_SIZE;
315 alloc = round_page_32(alloc);
316 max = round_page_32(max);
317 /*
318 * We look for an allocation size with least fragmentation
319 * in the range of 1 - 5 pages. This size will be used unless
320 * the user suggestion is larger AND has less fragmentation
321 */
322 { vm_size_t best, waste; unsigned int i;
323 best = PAGE_SIZE;
324 waste = best % size;
325 for (i = 2; i <= 5; i++){ vm_size_t tsize, twaste;
326 tsize = i * PAGE_SIZE;
327 twaste = tsize % size;
328 if (twaste < waste)
329 best = tsize, waste = twaste;
330 }
331 if (alloc <= best || (alloc % size >= waste))
332 alloc = best;
333 }
334 if (max && (max < alloc))
335 max = alloc;
336
337 z->free_elements = 0;
338 z->cur_size = 0;
339 z->max_size = max;
340 z->elem_size = size;
341 z->alloc_size = alloc;
342 z->zone_name = name;
343 z->count = 0;
344 z->doing_alloc = FALSE;
345 z->exhaustible = FALSE;
346 z->collectable = TRUE;
347 z->allows_foreign = FALSE;
348 z->expandable = TRUE;
349 z->waiting = FALSE;
350 z->async_pending = FALSE;
351
352 #if ZONE_DEBUG
353 z->active_zones.next = z->active_zones.prev = 0;
354 zone_debug_enable(z);
355 #endif /* ZONE_DEBUG */
356 lock_zone_init(z);
357
358 /*
359 * Add the zone to the all-zones list.
360 */
361
362 z->next_zone = ZONE_NULL;
363 thread_call_setup(&z->call_async_alloc, zalloc_async, z);
364 simple_lock(&all_zones_lock);
365 *last_zone = z;
366 last_zone = &z->next_zone;
367 num_zones++;
368 simple_unlock(&all_zones_lock);
369
370 return(z);
371 }
372
373 /*
374 * Cram the given memory into the specified zone.
375 */
376 void
377 zcram(
378 register zone_t zone,
379 vm_offset_t newmem,
380 vm_size_t size)
381 {
382 register vm_size_t elem_size;
383
384 /* Basic sanity checks */
385 assert(zone != ZONE_NULL && newmem != (vm_offset_t)0);
386 assert(!zone->collectable || zone->allows_foreign
387 || (from_zone_map(newmem, size)));
388
389 elem_size = zone->elem_size;
390
391 lock_zone(zone);
392 while (size >= elem_size) {
393 ADD_TO_ZONE(zone, newmem);
394 if (from_zone_map(newmem, elem_size))
395 zone_page_alloc(newmem, elem_size);
396 zone->count++; /* compensate for ADD_TO_ZONE */
397 size -= elem_size;
398 newmem += elem_size;
399 zone->cur_size += elem_size;
400 }
401 unlock_zone(zone);
402 }
403
404 /*
405 * Contiguous space allocator for non-paged zones. Allocates "size" amount
406 * of memory from zone_map.
407 */
408
409 kern_return_t
410 zget_space(
411 vm_offset_t size,
412 vm_offset_t *result)
413 {
414 vm_offset_t new_space = 0;
415 vm_size_t space_to_add;
416
417 simple_lock(&zget_space_lock);
418 while ((zalloc_next_space + size) > zalloc_end_of_space) {
419 /*
420 * Add at least one page to allocation area.
421 */
422
423 space_to_add = round_page_32(size);
424
425 if (new_space == 0) {
426 kern_return_t retval;
427 /*
428 * Memory cannot be wired down while holding
429 * any locks that the pageout daemon might
430 * need to free up pages. [Making the zget_space
431 * lock a complex lock does not help in this
432 * regard.]
433 *
434 * Unlock and allocate memory. Because several
435 * threads might try to do this at once, don't
436 * use the memory before checking for available
437 * space again.
438 */
439
440 simple_unlock(&zget_space_lock);
441
442 retval = kernel_memory_allocate(zone_map, &new_space,
443 space_to_add, 0, KMA_KOBJECT|KMA_NOPAGEWAIT);
444 if (retval != KERN_SUCCESS)
445 return(retval);
446 zone_page_init(new_space, space_to_add,
447 ZONE_PAGE_USED);
448 simple_lock(&zget_space_lock);
449 continue;
450 }
451
452
453 /*
454 * Memory was allocated in a previous iteration.
455 *
456 * Check whether the new region is contiguous
457 * with the old one.
458 */
459
460 if (new_space != zalloc_end_of_space) {
461 /*
462 * Throw away the remainder of the
463 * old space, and start a new one.
464 */
465 zalloc_wasted_space +=
466 zalloc_end_of_space - zalloc_next_space;
467 zalloc_next_space = new_space;
468 }
469
470 zalloc_end_of_space = new_space + space_to_add;
471
472 new_space = 0;
473 }
474 *result = zalloc_next_space;
475 zalloc_next_space += size;
476 simple_unlock(&zget_space_lock);
477
478 if (new_space != 0)
479 kmem_free(zone_map, new_space, space_to_add);
480
481 return(KERN_SUCCESS);
482 }
483
484
485 /*
486 * Steal memory for the zone package. Called from
487 * vm_page_bootstrap().
488 */
489 void
490 zone_steal_memory(void)
491 {
492 zdata_size = round_page_32(128*sizeof(struct zone));
493 zdata = pmap_steal_memory(zdata_size);
494 }
495
496
497 /*
498 * Fill a zone with enough memory to contain at least nelem elements.
499 * Memory is obtained with kmem_alloc_wired from the kernel_map.
500 * Return the number of elements actually put into the zone, which may
501 * be more than the caller asked for since the memory allocation is
502 * rounded up to a full page.
503 */
504 int
505 zfill(
506 zone_t zone,
507 int nelem)
508 {
509 kern_return_t kr;
510 vm_size_t size;
511 vm_offset_t memory;
512 int nalloc;
513
514 assert(nelem > 0);
515 if (nelem <= 0)
516 return 0;
517 size = nelem * zone->elem_size;
518 size = round_page_32(size);
519 kr = kmem_alloc_wired(kernel_map, &memory, size);
520 if (kr != KERN_SUCCESS)
521 return 0;
522
523 zone_change(zone, Z_FOREIGN, TRUE);
524 zcram(zone, memory, size);
525 nalloc = size / zone->elem_size;
526 assert(nalloc >= nelem);
527
528 return nalloc;
529 }
530
531 /*
532 * Initialize the "zone of zones" which uses fixed memory allocated
533 * earlier in memory initialization. zone_bootstrap is called
534 * before zone_init.
535 */
536 void
537 zone_bootstrap(void)
538 {
539 vm_size_t zone_zone_size;
540 vm_offset_t zone_zone_space;
541
542 simple_lock_init(&all_zones_lock, ETAP_MISC_ZONE_ALL);
543
544 first_zone = ZONE_NULL;
545 last_zone = &first_zone;
546 num_zones = 0;
547
548 simple_lock_init(&zget_space_lock, ETAP_MISC_ZONE_GET);
549 zalloc_next_space = zdata;
550 zalloc_end_of_space = zdata + zdata_size;
551 zalloc_wasted_space = 0;
552
553 /* assertion: nobody else called zinit before us */
554 assert(zone_zone == ZONE_NULL);
555 zone_zone = zinit(sizeof(struct zone), 128 * sizeof(struct zone),
556 sizeof(struct zone), "zones");
557 zone_change(zone_zone, Z_COLLECT, FALSE);
558 zone_zone_size = zalloc_end_of_space - zalloc_next_space;
559 zget_space(zone_zone_size, &zone_zone_space);
560 zcram(zone_zone, zone_zone_space, zone_zone_size);
561 }
562
563 void
564 zone_init(
565 vm_size_t max_zonemap_size)
566 {
567 kern_return_t retval;
568 vm_offset_t zone_min;
569 vm_offset_t zone_max;
570 vm_size_t zone_table_size;
571
572 retval = kmem_suballoc(kernel_map, &zone_min, max_zonemap_size,
573 FALSE, TRUE, &zone_map);
574 if (retval != KERN_SUCCESS)
575 panic("zone_init: kmem_suballoc failed");
576 zone_max = zone_min + round_page_32(max_zonemap_size);
577 /*
578 * Setup garbage collection information:
579 */
580 zone_table_size = atop_32(zone_max - zone_min) *
581 sizeof(struct zone_page_table_entry);
582 if (kmem_alloc_wired(zone_map, (vm_offset_t *) &zone_page_table,
583 zone_table_size) != KERN_SUCCESS)
584 panic("zone_init");
585 zone_min = (vm_offset_t)zone_page_table + round_page_32(zone_table_size);
586 zone_pages = atop_32(zone_max - zone_min);
587 zone_map_min_address = zone_min;
588 zone_map_max_address = zone_max;
589 mutex_init(&zone_gc_lock, ETAP_NO_TRACE);
590 zone_page_init(zone_min, zone_max - zone_min, ZONE_PAGE_UNUSED);
591 }
592
593
594 /*
595 * zalloc returns an element from the specified zone.
596 */
597 vm_offset_t
598 zalloc_canblock(
599 register zone_t zone,
600 boolean_t canblock)
601 {
602 vm_offset_t addr;
603 kern_return_t retval;
604
605 assert(zone != ZONE_NULL);
606 check_simple_locks();
607
608 lock_zone(zone);
609
610 REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
611
612 while ((addr == 0) && canblock) {
613 /*
614 * If nothing was there, try to get more
615 */
616 if (zone->doing_alloc) {
617 /*
618 * Someone is allocating memory for this zone.
619 * Wait for it to show up, then try again.
620 */
621 zone->waiting = TRUE;
622 zone_sleep(zone);
623 }
624 else {
625 if ((zone->cur_size + zone->elem_size) >
626 zone->max_size) {
627 if (zone->exhaustible)
628 break;
629 if (zone->expandable) {
630 /*
631 * We're willing to overflow certain
632 * zones, but not without complaining.
633 *
634 * This is best used in conjunction
635 * with the collectable flag. What we
636 * want is an assurance we can get the
637 * memory back, assuming there's no
638 * leak.
639 */
640 zone->max_size += (zone->max_size >> 1);
641 } else {
642 unlock_zone(zone);
643
644 panic("zalloc: zone \"%s\" empty.", zone->zone_name);
645 }
646 }
647 zone->doing_alloc = TRUE;
648 unlock_zone(zone);
649
650 if (zone->collectable) {
651 vm_offset_t space;
652 vm_size_t alloc_size;
653 boolean_t retry = FALSE;
654
655 for (;;) {
656
657 if (vm_pool_low() || retry == TRUE)
658 alloc_size =
659 round_page_32(zone->elem_size);
660 else
661 alloc_size = zone->alloc_size;
662
663 retval = kernel_memory_allocate(zone_map,
664 &space, alloc_size, 0,
665 KMA_KOBJECT|KMA_NOPAGEWAIT);
666 if (retval == KERN_SUCCESS) {
667 zone_page_init(space, alloc_size,
668 ZONE_PAGE_USED);
669 zcram(zone, space, alloc_size);
670
671 break;
672 } else if (retval != KERN_RESOURCE_SHORTAGE) {
673 /* would like to cause a zone_gc() */
674 if (retry == TRUE)
675 panic("zalloc");
676 retry = TRUE;
677 }
678 }
679 lock_zone(zone);
680 zone->doing_alloc = FALSE;
681 if (zone->waiting) {
682 zone->waiting = FALSE;
683 zone_wakeup(zone);
684 }
685 REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
686 if (addr == 0 &&
687 retval == KERN_RESOURCE_SHORTAGE) {
688 unlock_zone(zone);
689
690 VM_PAGE_WAIT();
691 lock_zone(zone);
692 }
693 } else {
694 vm_offset_t space;
695 retval = zget_space(zone->elem_size, &space);
696
697 lock_zone(zone);
698 zone->doing_alloc = FALSE;
699 if (zone->waiting) {
700 zone->waiting = FALSE;
701 thread_wakeup((event_t)zone);
702 }
703 if (retval == KERN_SUCCESS) {
704 zone->count++;
705 zone->cur_size += zone->elem_size;
706 #if ZONE_DEBUG
707 if (zone_debug_enabled(zone)) {
708 enqueue_tail(&zone->active_zones, (queue_entry_t)space);
709 }
710 #endif
711 unlock_zone(zone);
712 zone_page_alloc(space, zone->elem_size);
713 #if ZONE_DEBUG
714 if (zone_debug_enabled(zone))
715 space += ZONE_DEBUG_OFFSET;
716 #endif
717 return(space);
718 }
719 if (retval == KERN_RESOURCE_SHORTAGE) {
720 unlock_zone(zone);
721
722 VM_PAGE_WAIT();
723 lock_zone(zone);
724 } else {
725 panic("zalloc");
726 }
727 }
728 }
729 if (addr == 0)
730 REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
731 }
732
733 if ((addr == 0) && !canblock && (zone->async_pending == FALSE) && (!vm_pool_low())) {
734 zone->async_pending = TRUE;
735 unlock_zone(zone);
736 thread_call_enter(&zone->call_async_alloc);
737 lock_zone(zone);
738 REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
739 }
740
741 #if ZONE_DEBUG
742 if (addr && zone_debug_enabled(zone)) {
743 enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
744 addr += ZONE_DEBUG_OFFSET;
745 }
746 #endif
747
748 unlock_zone(zone);
749
750 return(addr);
751 }
752
753
754 vm_offset_t
755 zalloc(
756 register zone_t zone)
757 {
758 return( zalloc_canblock(zone, TRUE) );
759 }
760
761 vm_offset_t
762 zalloc_noblock(
763 register zone_t zone)
764 {
765 return( zalloc_canblock(zone, FALSE) );
766 }
767
768 void
769 zalloc_async(
770 thread_call_param_t p0,
771 thread_call_param_t p1)
772 {
773 vm_offset_t elt;
774
775 elt = zalloc_canblock((zone_t)p0, TRUE);
776 zfree((zone_t)p0, elt);
777 lock_zone(((zone_t)p0));
778 ((zone_t)p0)->async_pending = FALSE;
779 unlock_zone(((zone_t)p0));
780 }
781
782
783 /*
784 * zget returns an element from the specified zone
785 * and immediately returns nothing if there is nothing there.
786 *
787 * This form should be used when you can not block (like when
788 * processing an interrupt).
789 */
790 vm_offset_t
791 zget(
792 register zone_t zone)
793 {
794 register vm_offset_t addr;
795
796 assert( zone != ZONE_NULL );
797
798 if (!lock_try_zone(zone))
799 return ((vm_offset_t)0);
800
801 REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
802 #if ZONE_DEBUG
803 if (addr && zone_debug_enabled(zone)) {
804 enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
805 addr += ZONE_DEBUG_OFFSET;
806 }
807 #endif /* ZONE_DEBUG */
808 unlock_zone(zone);
809
810 return(addr);
811 }
812
813 /* Keep this FALSE by default. Large memory machine run orders of magnitude
814 slower in debug mode when true. Use debugger to enable if needed */
815 /* static */ boolean_t zone_check = FALSE;
816
817 static zone_t zone_last_bogus_zone = ZONE_NULL;
818 static vm_offset_t zone_last_bogus_elem = 0;
819
820 void
821 zfree(
822 register zone_t zone,
823 vm_offset_t elem)
824 {
825
826 #if MACH_ASSERT
827 /* Basic sanity checks */
828 if (zone == ZONE_NULL || elem == (vm_offset_t)0)
829 panic("zfree: NULL");
830 /* zone_gc assumes zones are never freed */
831 if (zone == zone_zone)
832 panic("zfree: freeing to zone_zone breaks zone_gc!");
833 #endif
834
835 if (zone->collectable && !zone->allows_foreign &&
836 !from_zone_map(elem, zone->elem_size)) {
837 #if MACH_ASSERT
838 panic("zfree: non-allocated memory in collectable zone!");
839 #else
840 zone_last_bogus_zone = zone;
841 zone_last_bogus_elem = elem;
842 return;
843 #endif
844 }
845
846 lock_zone(zone);
847 #if ZONE_DEBUG
848 if (zone_debug_enabled(zone)) {
849 queue_t tmp_elem;
850
851 elem -= ZONE_DEBUG_OFFSET;
852 if (zone_check) {
853 /* check the zone's consistency */
854
855 for (tmp_elem = queue_first(&zone->active_zones);
856 !queue_end(tmp_elem, &zone->active_zones);
857 tmp_elem = queue_next(tmp_elem))
858 if (elem == (vm_offset_t)tmp_elem)
859 break;
860 if (elem != (vm_offset_t)tmp_elem)
861 panic("zfree()ing element from wrong zone");
862 }
863 remqueue(&zone->active_zones, (queue_t) elem);
864 }
865 #endif /* ZONE_DEBUG */
866 if (zone_check) {
867 vm_offset_t this;
868
869 /* check the zone's consistency */
870
871 for (this = zone->free_elements;
872 this != 0;
873 this = * (vm_offset_t *) this)
874 if (!pmap_kernel_va(this) || this == elem)
875 panic("zfree");
876 }
877 ADD_TO_ZONE(zone, elem);
878
879 /*
880 * If elements have one or more pages, and memory is low,
881 * request to run the garbage collection in the zone the next
882 * time the pageout thread runs.
883 */
884 if (zone->elem_size >= PAGE_SIZE &&
885 vm_pool_low()){
886 zone_gc_forced = TRUE;
887 }
888 unlock_zone(zone);
889 }
890
891
892 /* Change a zone's flags.
893 * This routine must be called immediately after zinit.
894 */
895 void
896 zone_change(
897 zone_t zone,
898 unsigned int item,
899 boolean_t value)
900 {
901 assert( zone != ZONE_NULL );
902 assert( value == TRUE || value == FALSE );
903
904 switch(item){
905 case Z_EXHAUST:
906 zone->exhaustible = value;
907 break;
908 case Z_COLLECT:
909 zone->collectable = value;
910 break;
911 case Z_EXPAND:
912 zone->expandable = value;
913 break;
914 case Z_FOREIGN:
915 zone->allows_foreign = value;
916 break;
917 #if MACH_ASSERT
918 default:
919 panic("Zone_change: Wrong Item Type!");
920 /* break; */
921 #endif
922 }
923 lock_zone_init(zone);
924 }
925
926 /*
927 * Return the expected number of free elements in the zone.
928 * This calculation will be incorrect if items are zfree'd that
929 * were never zalloc'd/zget'd. The correct way to stuff memory
930 * into a zone is by zcram.
931 */
932
933 integer_t
934 zone_free_count(zone_t zone)
935 {
936 integer_t free_count;
937
938 lock_zone(zone);
939 free_count = zone->cur_size/zone->elem_size - zone->count;
940 unlock_zone(zone);
941
942 assert(free_count >= 0);
943
944 return(free_count);
945 }
946
947 /*
948 * zprealloc preallocates wired memory, exanding the specified
949 * zone to the specified size
950 */
951 void
952 zprealloc(
953 zone_t zone,
954 vm_size_t size)
955 {
956 vm_offset_t addr;
957
958 if (size != 0) {
959 if (kmem_alloc_wired(zone_map, &addr, size) != KERN_SUCCESS)
960 panic("zprealloc");
961 zone_page_init(addr, size, ZONE_PAGE_USED);
962 zcram(zone, addr, size);
963 }
964 }
965
966 /*
967 * Zone garbage collection subroutines
968 */
969
970 boolean_t
971 zone_page_collectable(
972 vm_offset_t addr,
973 vm_size_t size)
974 {
975 struct zone_page_table_entry *zp;
976 natural_t i, j;
977
978 #if MACH_ASSERT
979 if (!from_zone_map(addr, size))
980 panic("zone_page_collectable");
981 #endif
982
983 i = atop_32(addr-zone_map_min_address);
984 j = atop_32((addr+size-1) - zone_map_min_address);
985
986 for (zp = zone_page_table + i; i <= j; zp++, i++)
987 if (zp->collect_count == zp->alloc_count)
988 return (TRUE);
989
990 return (FALSE);
991 }
992
993 void
994 zone_page_keep(
995 vm_offset_t addr,
996 vm_size_t size)
997 {
998 struct zone_page_table_entry *zp;
999 natural_t i, j;
1000
1001 #if MACH_ASSERT
1002 if (!from_zone_map(addr, size))
1003 panic("zone_page_keep");
1004 #endif
1005
1006 i = atop_32(addr-zone_map_min_address);
1007 j = atop_32((addr+size-1) - zone_map_min_address);
1008
1009 for (zp = zone_page_table + i; i <= j; zp++, i++)
1010 zp->collect_count = 0;
1011 }
1012
1013 void
1014 zone_page_collect(
1015 vm_offset_t addr,
1016 vm_size_t size)
1017 {
1018 struct zone_page_table_entry *zp;
1019 natural_t i, j;
1020
1021 #if MACH_ASSERT
1022 if (!from_zone_map(addr, size))
1023 panic("zone_page_collect");
1024 #endif
1025
1026 i = atop_32(addr-zone_map_min_address);
1027 j = atop_32((addr+size-1) - zone_map_min_address);
1028
1029 for (zp = zone_page_table + i; i <= j; zp++, i++)
1030 ++zp->collect_count;
1031 }
1032
1033 void
1034 zone_page_init(
1035 vm_offset_t addr,
1036 vm_size_t size,
1037 int value)
1038 {
1039 struct zone_page_table_entry *zp;
1040 natural_t i, j;
1041
1042 #if MACH_ASSERT
1043 if (!from_zone_map(addr, size))
1044 panic("zone_page_init");
1045 #endif
1046
1047 i = atop_32(addr-zone_map_min_address);
1048 j = atop_32((addr+size-1) - zone_map_min_address);
1049
1050 for (zp = zone_page_table + i; i <= j; zp++, i++) {
1051 zp->alloc_count = value;
1052 zp->collect_count = 0;
1053 }
1054 }
1055
1056 void
1057 zone_page_alloc(
1058 vm_offset_t addr,
1059 vm_size_t size)
1060 {
1061 struct zone_page_table_entry *zp;
1062 natural_t i, j;
1063
1064 #if MACH_ASSERT
1065 if (!from_zone_map(addr, size))
1066 panic("zone_page_alloc");
1067 #endif
1068
1069 i = atop_32(addr-zone_map_min_address);
1070 j = atop_32((addr+size-1) - zone_map_min_address);
1071
1072 for (zp = zone_page_table + i; i <= j; zp++, i++) {
1073 /*
1074 * Set alloc_count to (ZONE_PAGE_USED + 1) if
1075 * it was previously set to ZONE_PAGE_UNUSED.
1076 */
1077 if (zp->alloc_count == ZONE_PAGE_UNUSED)
1078 zp->alloc_count = 1;
1079 else
1080 ++zp->alloc_count;
1081 }
1082 }
1083
1084 void
1085 zone_page_free_element(
1086 struct zone_page_table_entry **free_pages,
1087 vm_offset_t addr,
1088 vm_size_t size)
1089 {
1090 struct zone_page_table_entry *zp;
1091 natural_t i, j;
1092
1093 #if MACH_ASSERT
1094 if (!from_zone_map(addr, size))
1095 panic("zone_page_free_element");
1096 #endif
1097
1098 i = atop_32(addr-zone_map_min_address);
1099 j = atop_32((addr+size-1) - zone_map_min_address);
1100
1101 for (zp = zone_page_table + i; i <= j; zp++, i++) {
1102 if (zp->collect_count > 0)
1103 --zp->collect_count;
1104 if (--zp->alloc_count == 0) {
1105 zp->alloc_count = ZONE_PAGE_UNUSED;
1106 zp->collect_count = 0;
1107
1108 zp->link = *free_pages;
1109 *free_pages = zp;
1110 }
1111 }
1112 }
1113
1114
1115 /* This is used for walking through a zone's free element list.
1116 */
1117 struct zone_free_element {
1118 struct zone_free_element * next;
1119 };
1120
1121 struct {
1122 uint32_t pgs_freed;
1123
1124 uint32_t elems_collected,
1125 elems_freed,
1126 elems_kept;
1127 } zgc_stats;
1128
1129 /* Zone garbage collection
1130 *
1131 * zone_gc will walk through all the free elements in all the
1132 * zones that are marked collectable looking for reclaimable
1133 * pages. zone_gc is called by consider_zone_gc when the system
1134 * begins to run out of memory.
1135 */
1136 void
1137 zone_gc(void)
1138 {
1139 unsigned int max_zones;
1140 zone_t z;
1141 unsigned int i;
1142 struct zone_page_table_entry *zp, *zone_free_pages;
1143
1144 mutex_lock(&zone_gc_lock);
1145
1146 simple_lock(&all_zones_lock);
1147 max_zones = num_zones;
1148 z = first_zone;
1149 simple_unlock(&all_zones_lock);
1150
1151 #if MACH_ASSERT
1152 for (i = 0; i < zone_pages; i++)
1153 assert(zone_page_table[i].collect_count == 0);
1154 #endif /* MACH_ASSERT */
1155
1156 zone_free_pages = NULL;
1157
1158 for (i = 0; i < max_zones; i++, z = z->next_zone) {
1159 unsigned int n;
1160 vm_size_t elt_size, size_freed;
1161 struct zone_free_element *elt, *prev, *scan, *keep, *tail;
1162
1163 assert(z != ZONE_NULL);
1164
1165 if (!z->collectable)
1166 continue;
1167
1168 lock_zone(z);
1169
1170 elt_size = z->elem_size;
1171
1172 /*
1173 * Do a quick feasability check before we scan the zone:
1174 * skip unless there is likelihood of getting 1+ pages back.
1175 */
1176 if (z->cur_size - z->count * elt_size <= 2 * PAGE_SIZE){
1177 unlock_zone(z);
1178 continue;
1179 }
1180
1181 /*
1182 * Snatch all of the free elements away from the zone.
1183 */
1184
1185 scan = (void *)z->free_elements;
1186 (void *)z->free_elements = NULL;
1187
1188 unlock_zone(z);
1189
1190 /*
1191 * Pass 1:
1192 *
1193 * Determine which elements we can attempt to collect
1194 * and count them up in the page table. Foreign elements
1195 * are returned to the zone.
1196 */
1197
1198 prev = (void *)&scan;
1199 elt = scan;
1200 n = 0; tail = keep = NULL;
1201 while (elt != NULL) {
1202 if (from_zone_map(elt, elt_size)) {
1203 zone_page_collect((vm_offset_t)elt, elt_size);
1204
1205 prev = elt;
1206 elt = elt->next;
1207
1208 ++zgc_stats.elems_collected;
1209 }
1210 else {
1211 if (keep == NULL)
1212 keep = tail = elt;
1213 else
1214 tail = tail->next = elt;
1215
1216 elt = prev->next = elt->next;
1217 tail->next = NULL;
1218 }
1219
1220 /*
1221 * Dribble back the elements we are keeping.
1222 */
1223
1224 if (++n >= 50 && keep != NULL) {
1225 lock_zone(z);
1226
1227 tail->next = (void *)z->free_elements;
1228 (void *)z->free_elements = keep;
1229
1230 unlock_zone(z);
1231
1232 n = 0; tail = keep = NULL;
1233 }
1234 }
1235
1236 /*
1237 * Return any remaining elements.
1238 */
1239
1240 if (keep != NULL) {
1241 lock_zone(z);
1242
1243 tail->next = (void *)z->free_elements;
1244 (void *)z->free_elements = keep;
1245
1246 unlock_zone(z);
1247 }
1248
1249 /*
1250 * Pass 2:
1251 *
1252 * Determine which pages we can reclaim and
1253 * free those elements.
1254 */
1255
1256 size_freed = 0;
1257 prev = (void *)&scan;
1258 elt = scan;
1259 n = 0; tail = keep = NULL;
1260 while (elt != NULL) {
1261 if (zone_page_collectable((vm_offset_t)elt, elt_size)) {
1262 size_freed += elt_size;
1263 zone_page_free_element(&zone_free_pages,
1264 (vm_offset_t)elt, elt_size);
1265
1266 elt = prev->next = elt->next;
1267
1268 ++zgc_stats.elems_freed;
1269 }
1270 else {
1271 zone_page_keep((vm_offset_t)elt, elt_size);
1272
1273 if (keep == NULL)
1274 keep = tail = elt;
1275 else
1276 tail = tail->next = elt;
1277
1278 elt = prev->next = elt->next;
1279 tail->next = NULL;
1280
1281 ++zgc_stats.elems_kept;
1282 }
1283
1284 /*
1285 * Dribble back the elements we are keeping,
1286 * and update the zone size info.
1287 */
1288
1289 if (++n >= 50 && keep != NULL) {
1290 lock_zone(z);
1291
1292 z->cur_size -= size_freed;
1293 size_freed = 0;
1294
1295 tail->next = (void *)z->free_elements;
1296 (void *)z->free_elements = keep;
1297
1298 unlock_zone(z);
1299
1300 n = 0; tail = keep = NULL;
1301 }
1302 }
1303
1304 /*
1305 * Return any remaining elements, and update
1306 * the zone size info.
1307 */
1308
1309 if (size_freed > 0 || keep != NULL) {
1310 lock_zone(z);
1311
1312 z->cur_size -= size_freed;
1313
1314 if (keep != NULL) {
1315 tail->next = (void *)z->free_elements;
1316 (void *)z->free_elements = keep;
1317 }
1318
1319 unlock_zone(z);
1320 }
1321 }
1322
1323 /*
1324 * Reclaim the pages we are freeing.
1325 */
1326
1327 while ((zp = zone_free_pages) != NULL) {
1328 zone_free_pages = zp->link;
1329 kmem_free(zone_map, zone_map_min_address + PAGE_SIZE *
1330 (zp - zone_page_table), PAGE_SIZE);
1331 ++zgc_stats.pgs_freed;
1332 }
1333
1334 mutex_unlock(&zone_gc_lock);
1335 }
1336
1337 /*
1338 * consider_zone_gc:
1339 *
1340 * Called by the pageout daemon when the system needs more free pages.
1341 */
1342
1343 void
1344 consider_zone_gc(void)
1345 {
1346 /*
1347 * By default, don't attempt zone GC more frequently
1348 * than once / 2 seconds.
1349 */
1350
1351 if (zone_gc_max_rate == 0)
1352 zone_gc_max_rate = (2 << SCHED_TICK_SHIFT) + 1;
1353
1354 if (zone_gc_allowed &&
1355 ((sched_tick > (zone_gc_last_tick + zone_gc_max_rate)) ||
1356 zone_gc_forced)) {
1357 zone_gc_forced = FALSE;
1358 zone_gc_last_tick = sched_tick;
1359 zone_gc();
1360 }
1361 }
1362
1363 #include <mach/kern_return.h>
1364 #include <mach/machine/vm_types.h>
1365 #include <mach_debug/zone_info.h>
1366 #include <kern/host.h>
1367 #include <vm/vm_map.h>
1368 #include <vm/vm_kern.h>
1369
1370 #include <mach/mach_host_server.h>
1371
1372 kern_return_t
1373 host_zone_info(
1374 host_t host,
1375 zone_name_array_t *namesp,
1376 mach_msg_type_number_t *namesCntp,
1377 zone_info_array_t *infop,
1378 mach_msg_type_number_t *infoCntp)
1379 {
1380 zone_name_t *names;
1381 vm_offset_t names_addr;
1382 vm_size_t names_size;
1383 zone_info_t *info;
1384 vm_offset_t info_addr;
1385 vm_size_t info_size;
1386 unsigned int max_zones, i;
1387 zone_t z;
1388 zone_name_t *zn;
1389 zone_info_t *zi;
1390 kern_return_t kr;
1391
1392 if (host == HOST_NULL)
1393 return KERN_INVALID_HOST;
1394
1395 /*
1396 * We assume that zones aren't freed once allocated.
1397 * We won't pick up any zones that are allocated later.
1398 */
1399
1400 simple_lock(&all_zones_lock);
1401 #ifdef ppc
1402 max_zones = num_zones + 4;
1403 #else
1404 max_zones = num_zones + 2;
1405 #endif
1406 z = first_zone;
1407 simple_unlock(&all_zones_lock);
1408
1409 if (max_zones <= *namesCntp) {
1410 /* use in-line memory */
1411
1412 names = *namesp;
1413 } else {
1414 names_size = round_page_32(max_zones * sizeof *names);
1415 kr = kmem_alloc_pageable(ipc_kernel_map,
1416 &names_addr, names_size);
1417 if (kr != KERN_SUCCESS)
1418 return kr;
1419 names = (zone_name_t *) names_addr;
1420 }
1421
1422 if (max_zones <= *infoCntp) {
1423 /* use in-line memory */
1424
1425 info = *infop;
1426 } else {
1427 info_size = round_page_32(max_zones * sizeof *info);
1428 kr = kmem_alloc_pageable(ipc_kernel_map,
1429 &info_addr, info_size);
1430 if (kr != KERN_SUCCESS) {
1431 if (names != *namesp)
1432 kmem_free(ipc_kernel_map,
1433 names_addr, names_size);
1434 return kr;
1435 }
1436
1437 info = (zone_info_t *) info_addr;
1438 }
1439 zn = &names[0];
1440 zi = &info[0];
1441
1442 for (i = 0; i < num_zones; i++) {
1443 struct zone zcopy;
1444
1445 assert(z != ZONE_NULL);
1446
1447 lock_zone(z);
1448 zcopy = *z;
1449 unlock_zone(z);
1450
1451 simple_lock(&all_zones_lock);
1452 z = z->next_zone;
1453 simple_unlock(&all_zones_lock);
1454
1455 /* assuming here the name data is static */
1456 (void) strncpy(zn->zn_name, zcopy.zone_name,
1457 sizeof zn->zn_name);
1458
1459 zi->zi_count = zcopy.count;
1460 zi->zi_cur_size = zcopy.cur_size;
1461 zi->zi_max_size = zcopy.max_size;
1462 zi->zi_elem_size = zcopy.elem_size;
1463 zi->zi_alloc_size = zcopy.alloc_size;
1464 zi->zi_exhaustible = zcopy.exhaustible;
1465 zi->zi_collectable = zcopy.collectable;
1466
1467 zn++;
1468 zi++;
1469 }
1470 strcpy(zn->zn_name, "kernel_stacks");
1471 stack_fake_zone_info(&zi->zi_count, &zi->zi_cur_size, &zi->zi_max_size, &zi->zi_elem_size,
1472 &zi->zi_alloc_size, &zi->zi_collectable, &zi->zi_exhaustible);
1473 zn++;
1474 zi++;
1475 #ifdef ppc
1476 strcpy(zn->zn_name, "save_areas");
1477 save_fake_zone_info(&zi->zi_count, &zi->zi_cur_size, &zi->zi_max_size, &zi->zi_elem_size,
1478 &zi->zi_alloc_size, &zi->zi_collectable, &zi->zi_exhaustible);
1479 zn++;
1480 zi++;
1481
1482 strcpy(zn->zn_name, "pmap_mappings");
1483 mapping_fake_zone_info(&zi->zi_count, &zi->zi_cur_size, &zi->zi_max_size, &zi->zi_elem_size,
1484 &zi->zi_alloc_size, &zi->zi_collectable, &zi->zi_exhaustible);
1485 zn++;
1486 zi++;
1487 #endif
1488 strcpy(zn->zn_name, "kalloc.large");
1489 kalloc_fake_zone_info(&zi->zi_count, &zi->zi_cur_size, &zi->zi_max_size, &zi->zi_elem_size,
1490 &zi->zi_alloc_size, &zi->zi_collectable, &zi->zi_exhaustible);
1491
1492 if (names != *namesp) {
1493 vm_size_t used;
1494 vm_map_copy_t copy;
1495
1496 used = max_zones * sizeof *names;
1497
1498 if (used != names_size)
1499 bzero((char *) (names_addr + used), names_size - used);
1500
1501 kr = vm_map_copyin(ipc_kernel_map, names_addr, names_size,
1502 TRUE, &copy);
1503 assert(kr == KERN_SUCCESS);
1504
1505 *namesp = (zone_name_t *) copy;
1506 }
1507 *namesCntp = max_zones;
1508
1509 if (info != *infop) {
1510 vm_size_t used;
1511 vm_map_copy_t copy;
1512
1513 used = max_zones * sizeof *info;
1514
1515 if (used != info_size)
1516 bzero((char *) (info_addr + used), info_size - used);
1517
1518 kr = vm_map_copyin(ipc_kernel_map, info_addr, info_size,
1519 TRUE, &copy);
1520 assert(kr == KERN_SUCCESS);
1521
1522 *infop = (zone_info_t *) copy;
1523 }
1524 *infoCntp = max_zones;
1525
1526 return KERN_SUCCESS;
1527 }
1528
1529 #if MACH_KDB
1530 #include <ddb/db_command.h>
1531 #include <ddb/db_output.h>
1532 #include <kern/kern_print.h>
1533
1534 const char *zone_labels =
1535 "ENTRY COUNT TOT_SZ MAX_SZ ELT_SZ ALLOC_SZ NAME";
1536
1537 /* Forwards */
1538 void db_print_zone(
1539 zone_t addr);
1540
1541 #if ZONE_DEBUG
1542 void db_zone_check_active(
1543 zone_t zone);
1544 void db_zone_print_active(
1545 zone_t zone);
1546 #endif /* ZONE_DEBUG */
1547 void db_zone_print_free(
1548 zone_t zone);
1549 void
1550 db_print_zone(
1551 zone_t addr)
1552 {
1553 struct zone zcopy;
1554
1555 zcopy = *addr;
1556
1557 db_printf("%8x %8x %8x %8x %6x %8x %s ",
1558 addr, zcopy.count, zcopy.cur_size,
1559 zcopy.max_size, zcopy.elem_size,
1560 zcopy.alloc_size, zcopy.zone_name);
1561 if (zcopy.exhaustible)
1562 db_printf("H");
1563 if (zcopy.collectable)
1564 db_printf("C");
1565 if (zcopy.expandable)
1566 db_printf("X");
1567 db_printf("\n");
1568 }
1569
1570 /*ARGSUSED*/
1571 void
1572 db_show_one_zone(
1573 db_expr_t addr,
1574 int have_addr,
1575 db_expr_t count,
1576 char * modif)
1577 {
1578 struct zone *z = (zone_t)addr;
1579
1580 if (z == ZONE_NULL || !have_addr){
1581 db_error("No Zone\n");
1582 /*NOTREACHED*/
1583 }
1584
1585 db_printf("%s\n", zone_labels);
1586 db_print_zone(z);
1587 }
1588
1589 /*ARGSUSED*/
1590 void
1591 db_show_all_zones(
1592 db_expr_t addr,
1593 int have_addr,
1594 db_expr_t count,
1595 char * modif)
1596 {
1597 zone_t z;
1598 unsigned total = 0;
1599
1600 /*
1601 * Don't risk hanging by unconditionally locking,
1602 * risk of incoherent data is small (zones aren't freed).
1603 */
1604 have_addr = simple_lock_try(&all_zones_lock);
1605 count = num_zones;
1606 z = first_zone;
1607 if (have_addr) {
1608 simple_unlock(&all_zones_lock);
1609 }
1610
1611 db_printf("%s\n", zone_labels);
1612 for ( ; count > 0; count--) {
1613 if (!z) {
1614 db_error("Mangled Zone List\n");
1615 /*NOTREACHED*/
1616 }
1617 db_print_zone(z);
1618 total += z->cur_size,
1619
1620 have_addr = simple_lock_try(&all_zones_lock);
1621 z = z->next_zone;
1622 if (have_addr) {
1623 simple_unlock(&all_zones_lock);
1624 }
1625 }
1626 db_printf("\nTotal %8x", total);
1627 db_printf("\n\nzone_gc() has reclaimed %d pages\n", zgc_stats.pgs_freed);
1628 }
1629
1630 #if ZONE_DEBUG
1631 void
1632 db_zone_check_active(
1633 zone_t zone)
1634 {
1635 int count = 0;
1636 queue_t tmp_elem;
1637
1638 if (!zone_debug_enabled(zone) || !zone_check)
1639 return;
1640 tmp_elem = queue_first(&zone->active_zones);
1641 while (count < zone->count) {
1642 count++;
1643 if (tmp_elem == 0) {
1644 printf("unexpected zero element, zone=0x%x, count=%d\n",
1645 zone, count);
1646 assert(FALSE);
1647 break;
1648 }
1649 if (queue_end(tmp_elem, &zone->active_zones)) {
1650 printf("unexpected queue_end, zone=0x%x, count=%d\n",
1651 zone, count);
1652 assert(FALSE);
1653 break;
1654 }
1655 tmp_elem = queue_next(tmp_elem);
1656 }
1657 if (!queue_end(tmp_elem, &zone->active_zones)) {
1658 printf("not at queue_end, zone=0x%x, tmp_elem=0x%x\n",
1659 zone, tmp_elem);
1660 assert(FALSE);
1661 }
1662 }
1663
1664 void
1665 db_zone_print_active(
1666 zone_t zone)
1667 {
1668 int count = 0;
1669 queue_t tmp_elem;
1670
1671 if (!zone_debug_enabled(zone)) {
1672 printf("zone 0x%x debug not enabled\n", zone);
1673 return;
1674 }
1675 if (!zone_check) {
1676 printf("zone_check FALSE\n");
1677 return;
1678 }
1679
1680 printf("zone 0x%x, active elements %d\n", zone, zone->count);
1681 printf("active list:\n");
1682 tmp_elem = queue_first(&zone->active_zones);
1683 while (count < zone->count) {
1684 printf(" 0x%x", tmp_elem);
1685 count++;
1686 if ((count % 6) == 0)
1687 printf("\n");
1688 if (tmp_elem == 0) {
1689 printf("\nunexpected zero element, count=%d\n", count);
1690 break;
1691 }
1692 if (queue_end(tmp_elem, &zone->active_zones)) {
1693 printf("\nunexpected queue_end, count=%d\n", count);
1694 break;
1695 }
1696 tmp_elem = queue_next(tmp_elem);
1697 }
1698 if (!queue_end(tmp_elem, &zone->active_zones))
1699 printf("\nnot at queue_end, tmp_elem=0x%x\n", tmp_elem);
1700 else
1701 printf("\n");
1702 }
1703 #endif /* ZONE_DEBUG */
1704
1705 void
1706 db_zone_print_free(
1707 zone_t zone)
1708 {
1709 int count = 0;
1710 int freecount;
1711 vm_offset_t elem;
1712
1713 freecount = zone_free_count(zone);
1714 printf("zone 0x%x, free elements %d\n", zone, freecount);
1715 printf("free list:\n");
1716 elem = zone->free_elements;
1717 while (count < freecount) {
1718 printf(" 0x%x", elem);
1719 count++;
1720 if ((count % 6) == 0)
1721 printf("\n");
1722 if (elem == 0) {
1723 printf("\nunexpected zero element, count=%d\n", count);
1724 break;
1725 }
1726 elem = *((vm_offset_t *)elem);
1727 }
1728 if (elem != 0)
1729 printf("\nnot at end of free list, elem=0x%x\n", elem);
1730 else
1731 printf("\n");
1732 }
1733
1734 #endif /* MACH_KDB */
1735
1736
1737 #if ZONE_DEBUG
1738
1739 /* should we care about locks here ? */
1740
1741 #if MACH_KDB
1742 vm_offset_t
1743 next_element(
1744 zone_t z,
1745 vm_offset_t elt)
1746 {
1747 if (!zone_debug_enabled(z))
1748 return(0);
1749 elt -= ZONE_DEBUG_OFFSET;
1750 elt = (vm_offset_t) queue_next((queue_t) elt);
1751 if ((queue_t) elt == &z->active_zones)
1752 return(0);
1753 elt += ZONE_DEBUG_OFFSET;
1754 return(elt);
1755 }
1756
1757 vm_offset_t
1758 first_element(
1759 zone_t z)
1760 {
1761 vm_offset_t elt;
1762
1763 if (!zone_debug_enabled(z))
1764 return(0);
1765 if (queue_empty(&z->active_zones))
1766 return(0);
1767 elt = (vm_offset_t) queue_first(&z->active_zones);
1768 elt += ZONE_DEBUG_OFFSET;
1769 return(elt);
1770 }
1771
1772 /*
1773 * Second arg controls how many zone elements are printed:
1774 * 0 => none
1775 * n, n < 0 => all
1776 * n, n > 0 => last n on active list
1777 */
1778 int
1779 zone_count(
1780 zone_t z,
1781 int tail)
1782 {
1783 vm_offset_t elt;
1784 int count = 0;
1785 boolean_t print = (tail != 0);
1786
1787 if (tail < 0)
1788 tail = z->count;
1789 if (z->count < tail)
1790 tail = 0;
1791 tail = z->count - tail;
1792 for (elt = first_element(z); elt; elt = next_element(z, elt)) {
1793 if (print && tail <= count)
1794 db_printf("%8x\n", elt);
1795 count++;
1796 }
1797 assert(count == z->count);
1798 return(count);
1799 }
1800 #endif /* MACH_KDB */
1801
1802 #define zone_in_use(z) ( z->count || z->free_elements )
1803
1804 void
1805 zone_debug_enable(
1806 zone_t z)
1807 {
1808 if (zone_debug_enabled(z) || zone_in_use(z) ||
1809 z->alloc_size < (z->elem_size + ZONE_DEBUG_OFFSET))
1810 return;
1811 queue_init(&z->active_zones);
1812 z->elem_size += ZONE_DEBUG_OFFSET;
1813 }
1814
1815 void
1816 zone_debug_disable(
1817 zone_t z)
1818 {
1819 if (!zone_debug_enabled(z) || zone_in_use(z))
1820 return;
1821 z->elem_size -= ZONE_DEBUG_OFFSET;
1822 z->active_zones.next = z->active_zones.prev = 0;
1823 }
1824 #endif /* ZONE_DEBUG */