]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/zalloc.c
xnu-517.11.1.tar.gz
[apple/xnu.git] / osfmk / kern / zalloc.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: kern/zalloc.c
54 * Author: Avadis Tevanian, Jr.
55 *
56 * Zone-based memory allocator. A zone is a collection of fixed size
57 * data blocks for which quick allocation/deallocation is possible.
58 */
59 #include <zone_debug.h>
60 #include <norma_vm.h>
61 #include <mach_kdb.h>
62 #include <kern/ast.h>
63 #include <kern/assert.h>
64 #include <kern/macro_help.h>
65 #include <kern/sched.h>
66 #include <kern/lock.h>
67 #include <kern/sched_prim.h>
68 #include <kern/misc_protos.h>
69 #include <kern/thread_call.h>
70 #include <kern/zalloc.h>
71 #include <mach/vm_param.h>
72 #include <vm/vm_kern.h>
73 #include <machine/machparam.h>
74
75
76 #if MACH_ASSERT
77 /* Detect use of zone elt after freeing it by two methods:
78 * (1) Range-check the free-list "next" ptr for sanity.
79 * (2) Store the ptr in two different words, and compare them against
80 * each other when re-using the zone elt, to detect modifications;
81 */
82
83 #if defined(__alpha)
84
85 #define is_kernel_data_addr(a) \
86 (!(a) || IS_SYS_VA(a) && !((a) & (sizeof(long)-1)))
87
88 #else /* !defined(__alpha) */
89
90 #define is_kernel_data_addr(a) \
91 (!(a) || (a) >= VM_MIN_KERNEL_ADDRESS && !((a) & 0x3))
92
93 #endif /* defined(__alpha) */
94
95 /* Should we set all words of the zone element to an illegal address
96 * when it is freed, to help catch usage after freeing? The down-side
97 * is that this obscures the identity of the freed element.
98 */
99 boolean_t zfree_clear = FALSE;
100
101 #define ADD_TO_ZONE(zone, element) \
102 MACRO_BEGIN \
103 if (zfree_clear) \
104 { int i; \
105 for (i=1; \
106 i < zone->elem_size/sizeof(vm_offset_t) - 1; \
107 i++) \
108 ((vm_offset_t *)(element))[i] = 0xdeadbeef; \
109 } \
110 ((vm_offset_t *)(element))[0] = (zone)->free_elements; \
111 (zone)->free_elements = (vm_offset_t) (element); \
112 (zone)->count--; \
113 MACRO_END
114
115 #define REMOVE_FROM_ZONE(zone, ret, type) \
116 MACRO_BEGIN \
117 (ret) = (type) (zone)->free_elements; \
118 if ((ret) != (type) 0) { \
119 if (!is_kernel_data_addr(((vm_offset_t *)(ret))[0])) { \
120 panic("A freed zone element has been modified.\n"); \
121 } \
122 (zone)->count++; \
123 (zone)->free_elements = *((vm_offset_t *)(ret)); \
124 } \
125 MACRO_END
126 #else /* MACH_ASSERT */
127
128 #define ADD_TO_ZONE(zone, element) \
129 MACRO_BEGIN \
130 *((vm_offset_t *)(element)) = (zone)->free_elements; \
131 (zone)->free_elements = (vm_offset_t) (element); \
132 (zone)->count--; \
133 MACRO_END
134
135 #define REMOVE_FROM_ZONE(zone, ret, type) \
136 MACRO_BEGIN \
137 (ret) = (type) (zone)->free_elements; \
138 if ((ret) != (type) 0) { \
139 (zone)->count++; \
140 (zone)->free_elements = *((vm_offset_t *)(ret)); \
141 } \
142 MACRO_END
143
144 #endif /* MACH_ASSERT */
145
146 #if ZONE_DEBUG
147 #define zone_debug_enabled(z) z->active_zones.next
148 #define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y))
149 #define ZONE_DEBUG_OFFSET ROUNDUP(sizeof(queue_chain_t),16)
150 #endif /* ZONE_DEBUG */
151
152 /*
153 * Support for garbage collection of unused zone pages:
154 */
155
156 struct zone_page_table_entry {
157 struct zone_page_table_entry *link;
158 short alloc_count;
159 short collect_count;
160 };
161
162 /* Forwards */
163 void zone_page_init(
164 vm_offset_t addr,
165 vm_size_t size,
166 int value);
167
168 void zone_page_alloc(
169 vm_offset_t addr,
170 vm_size_t size);
171
172 void zone_page_free_element(
173 struct zone_page_table_entry **free_pages,
174 vm_offset_t addr,
175 vm_size_t size);
176
177 void zone_page_collect(
178 vm_offset_t addr,
179 vm_size_t size);
180
181 boolean_t zone_page_collectable(
182 vm_offset_t addr,
183 vm_size_t size);
184
185 void zone_page_keep(
186 vm_offset_t addr,
187 vm_size_t size);
188
189 void zalloc_async(
190 thread_call_param_t p0,
191 thread_call_param_t p1);
192
193
194 #if ZONE_DEBUG && MACH_KDB
195 int zone_count(
196 zone_t z,
197 int tail);
198 #endif /* ZONE_DEBUG && MACH_KDB */
199
200 vm_map_t zone_map = VM_MAP_NULL;
201
202 zone_t zone_zone = ZONE_NULL; /* the zone containing other zones */
203
204 /*
205 * The VM system gives us an initial chunk of memory.
206 * It has to be big enough to allocate the zone_zone
207 */
208
209 vm_offset_t zdata;
210 vm_size_t zdata_size;
211
212 #define lock_zone(zone) \
213 MACRO_BEGIN \
214 simple_lock(&(zone)->lock); \
215 MACRO_END
216
217 #define unlock_zone(zone) \
218 MACRO_BEGIN \
219 simple_unlock(&(zone)->lock); \
220 MACRO_END
221
222 #define zone_wakeup(zone) thread_wakeup((event_t)(zone))
223 #define zone_sleep(zone) \
224 thread_sleep_simple_lock((event_t)(zone), \
225 &(zone)->lock, \
226 THREAD_UNINT)
227
228 #define lock_zone_init(zone) \
229 MACRO_BEGIN \
230 simple_lock_init(&zone->lock, ETAP_MISC_ZONE); \
231 MACRO_END
232
233 #define lock_try_zone(zone) simple_lock_try(&zone->lock)
234
235 kern_return_t zget_space(
236 vm_offset_t size,
237 vm_offset_t *result);
238
239 decl_simple_lock_data(,zget_space_lock)
240 vm_offset_t zalloc_next_space;
241 vm_offset_t zalloc_end_of_space;
242 vm_size_t zalloc_wasted_space;
243
244 /*
245 * Garbage collection map information
246 */
247 struct zone_page_table_entry * zone_page_table;
248 vm_offset_t zone_map_min_address;
249 vm_offset_t zone_map_max_address;
250 integer_t zone_pages;
251
252 /*
253 * Exclude more than one concurrent garbage collection
254 */
255 decl_mutex_data(, zone_gc_lock)
256
257 #define from_zone_map(addr, size) \
258 ((vm_offset_t)(addr) >= zone_map_min_address && \
259 ((vm_offset_t)(addr) + size -1) < zone_map_max_address)
260
261 #define ZONE_PAGE_USED 0
262 #define ZONE_PAGE_UNUSED -1
263
264
265 /*
266 * Protects first_zone, last_zone, num_zones,
267 * and the next_zone field of zones.
268 */
269 decl_simple_lock_data(, all_zones_lock)
270 zone_t first_zone;
271 zone_t *last_zone;
272 int num_zones;
273
274 boolean_t zone_gc_allowed = TRUE;
275 boolean_t zone_gc_forced = FALSE;
276 unsigned zone_gc_last_tick = 0;
277 unsigned zone_gc_max_rate = 0; /* in ticks */
278
279
280 /*
281 * zinit initializes a new zone. The zone data structures themselves
282 * are stored in a zone, which is initially a static structure that
283 * is initialized by zone_init.
284 */
285 zone_t
286 zinit(
287 vm_size_t size, /* the size of an element */
288 vm_size_t max, /* maximum memory to use */
289 vm_size_t alloc, /* allocation size */
290 char *name) /* a name for the zone */
291 {
292 zone_t z;
293
294 if (zone_zone == ZONE_NULL) {
295 if (zget_space(sizeof(struct zone), (vm_offset_t *)&z)
296 != KERN_SUCCESS)
297 return(ZONE_NULL);
298 } else
299 z = (zone_t) zalloc(zone_zone);
300 if (z == ZONE_NULL)
301 return(ZONE_NULL);
302
303 /*
304 * Round off all the parameters appropriately.
305 */
306 if (size < sizeof(z->free_elements))
307 size = sizeof(z->free_elements);
308 size = ((size-1) + sizeof(z->free_elements)) -
309 ((size-1) % sizeof(z->free_elements));
310 if (alloc == 0)
311 alloc = PAGE_SIZE;
312 alloc = round_page_32(alloc);
313 max = round_page_32(max);
314 /*
315 * We look for an allocation size with least fragmentation
316 * in the range of 1 - 5 pages. This size will be used unless
317 * the user suggestion is larger AND has less fragmentation
318 */
319 { vm_size_t best, waste; unsigned int i;
320 best = PAGE_SIZE;
321 waste = best % size;
322 for (i = 2; i <= 5; i++){ vm_size_t tsize, twaste;
323 tsize = i * PAGE_SIZE;
324 twaste = tsize % size;
325 if (twaste < waste)
326 best = tsize, waste = twaste;
327 }
328 if (alloc <= best || (alloc % size >= waste))
329 alloc = best;
330 }
331 if (max && (max < alloc))
332 max = alloc;
333
334 z->free_elements = 0;
335 z->cur_size = 0;
336 z->max_size = max;
337 z->elem_size = size;
338 z->alloc_size = alloc;
339 z->zone_name = name;
340 z->count = 0;
341 z->doing_alloc = FALSE;
342 z->exhaustible = FALSE;
343 z->collectable = TRUE;
344 z->allows_foreign = FALSE;
345 z->expandable = TRUE;
346 z->waiting = FALSE;
347 z->async_pending = FALSE;
348
349 #if ZONE_DEBUG
350 z->active_zones.next = z->active_zones.prev = 0;
351 zone_debug_enable(z);
352 #endif /* ZONE_DEBUG */
353 lock_zone_init(z);
354
355 /*
356 * Add the zone to the all-zones list.
357 */
358
359 z->next_zone = ZONE_NULL;
360 thread_call_setup(&z->call_async_alloc, zalloc_async, z);
361 simple_lock(&all_zones_lock);
362 *last_zone = z;
363 last_zone = &z->next_zone;
364 num_zones++;
365 simple_unlock(&all_zones_lock);
366
367 return(z);
368 }
369
370 /*
371 * Cram the given memory into the specified zone.
372 */
373 void
374 zcram(
375 register zone_t zone,
376 vm_offset_t newmem,
377 vm_size_t size)
378 {
379 register vm_size_t elem_size;
380
381 /* Basic sanity checks */
382 assert(zone != ZONE_NULL && newmem != (vm_offset_t)0);
383 assert(!zone->collectable || zone->allows_foreign
384 || (from_zone_map(newmem, size)));
385
386 elem_size = zone->elem_size;
387
388 lock_zone(zone);
389 while (size >= elem_size) {
390 ADD_TO_ZONE(zone, newmem);
391 if (from_zone_map(newmem, elem_size))
392 zone_page_alloc(newmem, elem_size);
393 zone->count++; /* compensate for ADD_TO_ZONE */
394 size -= elem_size;
395 newmem += elem_size;
396 zone->cur_size += elem_size;
397 }
398 unlock_zone(zone);
399 }
400
401 /*
402 * Contiguous space allocator for non-paged zones. Allocates "size" amount
403 * of memory from zone_map.
404 */
405
406 kern_return_t
407 zget_space(
408 vm_offset_t size,
409 vm_offset_t *result)
410 {
411 vm_offset_t new_space = 0;
412 vm_size_t space_to_add;
413
414 simple_lock(&zget_space_lock);
415 while ((zalloc_next_space + size) > zalloc_end_of_space) {
416 /*
417 * Add at least one page to allocation area.
418 */
419
420 space_to_add = round_page_32(size);
421
422 if (new_space == 0) {
423 kern_return_t retval;
424 /*
425 * Memory cannot be wired down while holding
426 * any locks that the pageout daemon might
427 * need to free up pages. [Making the zget_space
428 * lock a complex lock does not help in this
429 * regard.]
430 *
431 * Unlock and allocate memory. Because several
432 * threads might try to do this at once, don't
433 * use the memory before checking for available
434 * space again.
435 */
436
437 simple_unlock(&zget_space_lock);
438
439 retval = kernel_memory_allocate(zone_map, &new_space,
440 space_to_add, 0, KMA_KOBJECT|KMA_NOPAGEWAIT);
441 if (retval != KERN_SUCCESS)
442 return(retval);
443 zone_page_init(new_space, space_to_add,
444 ZONE_PAGE_USED);
445 simple_lock(&zget_space_lock);
446 continue;
447 }
448
449
450 /*
451 * Memory was allocated in a previous iteration.
452 *
453 * Check whether the new region is contiguous
454 * with the old one.
455 */
456
457 if (new_space != zalloc_end_of_space) {
458 /*
459 * Throw away the remainder of the
460 * old space, and start a new one.
461 */
462 zalloc_wasted_space +=
463 zalloc_end_of_space - zalloc_next_space;
464 zalloc_next_space = new_space;
465 }
466
467 zalloc_end_of_space = new_space + space_to_add;
468
469 new_space = 0;
470 }
471 *result = zalloc_next_space;
472 zalloc_next_space += size;
473 simple_unlock(&zget_space_lock);
474
475 if (new_space != 0)
476 kmem_free(zone_map, new_space, space_to_add);
477
478 return(KERN_SUCCESS);
479 }
480
481
482 /*
483 * Steal memory for the zone package. Called from
484 * vm_page_bootstrap().
485 */
486 void
487 zone_steal_memory(void)
488 {
489 zdata_size = round_page_32(128*sizeof(struct zone));
490 zdata = pmap_steal_memory(zdata_size);
491 }
492
493
494 /*
495 * Fill a zone with enough memory to contain at least nelem elements.
496 * Memory is obtained with kmem_alloc_wired from the kernel_map.
497 * Return the number of elements actually put into the zone, which may
498 * be more than the caller asked for since the memory allocation is
499 * rounded up to a full page.
500 */
501 int
502 zfill(
503 zone_t zone,
504 int nelem)
505 {
506 kern_return_t kr;
507 vm_size_t size;
508 vm_offset_t memory;
509 int nalloc;
510
511 assert(nelem > 0);
512 if (nelem <= 0)
513 return 0;
514 size = nelem * zone->elem_size;
515 size = round_page_32(size);
516 kr = kmem_alloc_wired(kernel_map, &memory, size);
517 if (kr != KERN_SUCCESS)
518 return 0;
519
520 zone_change(zone, Z_FOREIGN, TRUE);
521 zcram(zone, memory, size);
522 nalloc = size / zone->elem_size;
523 assert(nalloc >= nelem);
524
525 return nalloc;
526 }
527
528 /*
529 * Initialize the "zone of zones" which uses fixed memory allocated
530 * earlier in memory initialization. zone_bootstrap is called
531 * before zone_init.
532 */
533 void
534 zone_bootstrap(void)
535 {
536 vm_size_t zone_zone_size;
537 vm_offset_t zone_zone_space;
538
539 simple_lock_init(&all_zones_lock, ETAP_MISC_ZONE_ALL);
540
541 first_zone = ZONE_NULL;
542 last_zone = &first_zone;
543 num_zones = 0;
544
545 simple_lock_init(&zget_space_lock, ETAP_MISC_ZONE_GET);
546 zalloc_next_space = zdata;
547 zalloc_end_of_space = zdata + zdata_size;
548 zalloc_wasted_space = 0;
549
550 /* assertion: nobody else called zinit before us */
551 assert(zone_zone == ZONE_NULL);
552 zone_zone = zinit(sizeof(struct zone), 128 * sizeof(struct zone),
553 sizeof(struct zone), "zones");
554 zone_change(zone_zone, Z_COLLECT, FALSE);
555 zone_zone_size = zalloc_end_of_space - zalloc_next_space;
556 zget_space(zone_zone_size, &zone_zone_space);
557 zcram(zone_zone, zone_zone_space, zone_zone_size);
558 }
559
560 void
561 zone_init(
562 vm_size_t max_zonemap_size)
563 {
564 kern_return_t retval;
565 vm_offset_t zone_min;
566 vm_offset_t zone_max;
567 vm_size_t zone_table_size;
568
569 retval = kmem_suballoc(kernel_map, &zone_min, max_zonemap_size,
570 FALSE, TRUE, &zone_map);
571 if (retval != KERN_SUCCESS)
572 panic("zone_init: kmem_suballoc failed");
573 zone_max = zone_min + round_page_32(max_zonemap_size);
574 /*
575 * Setup garbage collection information:
576 */
577 zone_table_size = atop_32(zone_max - zone_min) *
578 sizeof(struct zone_page_table_entry);
579 if (kmem_alloc_wired(zone_map, (vm_offset_t *) &zone_page_table,
580 zone_table_size) != KERN_SUCCESS)
581 panic("zone_init");
582 zone_min = (vm_offset_t)zone_page_table + round_page_32(zone_table_size);
583 zone_pages = atop_32(zone_max - zone_min);
584 zone_map_min_address = zone_min;
585 zone_map_max_address = zone_max;
586 mutex_init(&zone_gc_lock, ETAP_NO_TRACE);
587 zone_page_init(zone_min, zone_max - zone_min, ZONE_PAGE_UNUSED);
588 }
589
590
591 /*
592 * zalloc returns an element from the specified zone.
593 */
594 vm_offset_t
595 zalloc_canblock(
596 register zone_t zone,
597 boolean_t canblock)
598 {
599 vm_offset_t addr;
600 kern_return_t retval;
601
602 assert(zone != ZONE_NULL);
603 check_simple_locks();
604
605 lock_zone(zone);
606
607 REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
608
609 while ((addr == 0) && canblock) {
610 /*
611 * If nothing was there, try to get more
612 */
613 if (zone->doing_alloc) {
614 /*
615 * Someone is allocating memory for this zone.
616 * Wait for it to show up, then try again.
617 */
618 zone->waiting = TRUE;
619 zone_sleep(zone);
620 }
621 else {
622 if ((zone->cur_size + zone->elem_size) >
623 zone->max_size) {
624 if (zone->exhaustible)
625 break;
626 if (zone->expandable) {
627 /*
628 * We're willing to overflow certain
629 * zones, but not without complaining.
630 *
631 * This is best used in conjunction
632 * with the collectable flag. What we
633 * want is an assurance we can get the
634 * memory back, assuming there's no
635 * leak.
636 */
637 zone->max_size += (zone->max_size >> 1);
638 } else {
639 unlock_zone(zone);
640
641 panic("zalloc: zone \"%s\" empty.", zone->zone_name);
642 }
643 }
644 zone->doing_alloc = TRUE;
645 unlock_zone(zone);
646
647 if (zone->collectable) {
648 vm_offset_t space;
649 vm_size_t alloc_size;
650 boolean_t retry = FALSE;
651
652 for (;;) {
653
654 if (vm_pool_low() || retry == TRUE)
655 alloc_size =
656 round_page_32(zone->elem_size);
657 else
658 alloc_size = zone->alloc_size;
659
660 retval = kernel_memory_allocate(zone_map,
661 &space, alloc_size, 0,
662 KMA_KOBJECT|KMA_NOPAGEWAIT);
663 if (retval == KERN_SUCCESS) {
664 zone_page_init(space, alloc_size,
665 ZONE_PAGE_USED);
666 zcram(zone, space, alloc_size);
667
668 break;
669 } else if (retval != KERN_RESOURCE_SHORTAGE) {
670 /* would like to cause a zone_gc() */
671 if (retry == TRUE)
672 panic("zalloc");
673 retry = TRUE;
674 } else {
675 break;
676 }
677 }
678 lock_zone(zone);
679 zone->doing_alloc = FALSE;
680 if (zone->waiting) {
681 zone->waiting = FALSE;
682 zone_wakeup(zone);
683 }
684 REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
685 if (addr == 0 &&
686 retval == KERN_RESOURCE_SHORTAGE) {
687 unlock_zone(zone);
688
689 VM_PAGE_WAIT();
690 lock_zone(zone);
691 }
692 } else {
693 vm_offset_t space;
694 retval = zget_space(zone->elem_size, &space);
695
696 lock_zone(zone);
697 zone->doing_alloc = FALSE;
698 if (zone->waiting) {
699 zone->waiting = FALSE;
700 thread_wakeup((event_t)zone);
701 }
702 if (retval == KERN_SUCCESS) {
703 zone->count++;
704 zone->cur_size += zone->elem_size;
705 #if ZONE_DEBUG
706 if (zone_debug_enabled(zone)) {
707 enqueue_tail(&zone->active_zones, (queue_entry_t)space);
708 }
709 #endif
710 unlock_zone(zone);
711 zone_page_alloc(space, zone->elem_size);
712 #if ZONE_DEBUG
713 if (zone_debug_enabled(zone))
714 space += ZONE_DEBUG_OFFSET;
715 #endif
716 return(space);
717 }
718 if (retval == KERN_RESOURCE_SHORTAGE) {
719 unlock_zone(zone);
720
721 VM_PAGE_WAIT();
722 lock_zone(zone);
723 } else {
724 panic("zalloc");
725 }
726 }
727 }
728 if (addr == 0)
729 REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
730 }
731
732 if ((addr == 0) && !canblock && (zone->async_pending == FALSE) && (!vm_pool_low())) {
733 zone->async_pending = TRUE;
734 unlock_zone(zone);
735 thread_call_enter(&zone->call_async_alloc);
736 lock_zone(zone);
737 REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
738 }
739
740 #if ZONE_DEBUG
741 if (addr && zone_debug_enabled(zone)) {
742 enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
743 addr += ZONE_DEBUG_OFFSET;
744 }
745 #endif
746
747 unlock_zone(zone);
748
749 return(addr);
750 }
751
752
753 vm_offset_t
754 zalloc(
755 register zone_t zone)
756 {
757 return( zalloc_canblock(zone, TRUE) );
758 }
759
760 vm_offset_t
761 zalloc_noblock(
762 register zone_t zone)
763 {
764 return( zalloc_canblock(zone, FALSE) );
765 }
766
767 void
768 zalloc_async(
769 thread_call_param_t p0,
770 thread_call_param_t p1)
771 {
772 vm_offset_t elt;
773
774 elt = zalloc_canblock((zone_t)p0, TRUE);
775 zfree((zone_t)p0, elt);
776 lock_zone(((zone_t)p0));
777 ((zone_t)p0)->async_pending = FALSE;
778 unlock_zone(((zone_t)p0));
779 }
780
781
782 /*
783 * zget returns an element from the specified zone
784 * and immediately returns nothing if there is nothing there.
785 *
786 * This form should be used when you can not block (like when
787 * processing an interrupt).
788 */
789 vm_offset_t
790 zget(
791 register zone_t zone)
792 {
793 register vm_offset_t addr;
794
795 assert( zone != ZONE_NULL );
796
797 if (!lock_try_zone(zone))
798 return ((vm_offset_t)0);
799
800 REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
801 #if ZONE_DEBUG
802 if (addr && zone_debug_enabled(zone)) {
803 enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
804 addr += ZONE_DEBUG_OFFSET;
805 }
806 #endif /* ZONE_DEBUG */
807 unlock_zone(zone);
808
809 return(addr);
810 }
811
812 /* Keep this FALSE by default. Large memory machine run orders of magnitude
813 slower in debug mode when true. Use debugger to enable if needed */
814 /* static */ boolean_t zone_check = FALSE;
815
816 static zone_t zone_last_bogus_zone = ZONE_NULL;
817 static vm_offset_t zone_last_bogus_elem = 0;
818
819 void
820 zfree(
821 register zone_t zone,
822 vm_offset_t elem)
823 {
824
825 #if MACH_ASSERT
826 /* Basic sanity checks */
827 if (zone == ZONE_NULL || elem == (vm_offset_t)0)
828 panic("zfree: NULL");
829 /* zone_gc assumes zones are never freed */
830 if (zone == zone_zone)
831 panic("zfree: freeing to zone_zone breaks zone_gc!");
832 #endif
833
834 if (zone->collectable && !zone->allows_foreign &&
835 !from_zone_map(elem, zone->elem_size)) {
836 #if MACH_ASSERT
837 panic("zfree: non-allocated memory in collectable zone!");
838 #else
839 zone_last_bogus_zone = zone;
840 zone_last_bogus_elem = elem;
841 return;
842 #endif
843 }
844
845 lock_zone(zone);
846 #if ZONE_DEBUG
847 if (zone_debug_enabled(zone)) {
848 queue_t tmp_elem;
849
850 elem -= ZONE_DEBUG_OFFSET;
851 if (zone_check) {
852 /* check the zone's consistency */
853
854 for (tmp_elem = queue_first(&zone->active_zones);
855 !queue_end(tmp_elem, &zone->active_zones);
856 tmp_elem = queue_next(tmp_elem))
857 if (elem == (vm_offset_t)tmp_elem)
858 break;
859 if (elem != (vm_offset_t)tmp_elem)
860 panic("zfree()ing element from wrong zone");
861 }
862 remqueue(&zone->active_zones, (queue_t) elem);
863 }
864 #endif /* ZONE_DEBUG */
865 if (zone_check) {
866 vm_offset_t this;
867
868 /* check the zone's consistency */
869
870 for (this = zone->free_elements;
871 this != 0;
872 this = * (vm_offset_t *) this)
873 if (!pmap_kernel_va(this) || this == elem)
874 panic("zfree");
875 }
876 ADD_TO_ZONE(zone, elem);
877
878 /*
879 * If elements have one or more pages, and memory is low,
880 * request to run the garbage collection in the zone the next
881 * time the pageout thread runs.
882 */
883 if (zone->elem_size >= PAGE_SIZE &&
884 vm_pool_low()){
885 zone_gc_forced = TRUE;
886 }
887 unlock_zone(zone);
888 }
889
890
891 /* Change a zone's flags.
892 * This routine must be called immediately after zinit.
893 */
894 void
895 zone_change(
896 zone_t zone,
897 unsigned int item,
898 boolean_t value)
899 {
900 assert( zone != ZONE_NULL );
901 assert( value == TRUE || value == FALSE );
902
903 switch(item){
904 case Z_EXHAUST:
905 zone->exhaustible = value;
906 break;
907 case Z_COLLECT:
908 zone->collectable = value;
909 break;
910 case Z_EXPAND:
911 zone->expandable = value;
912 break;
913 case Z_FOREIGN:
914 zone->allows_foreign = value;
915 break;
916 #if MACH_ASSERT
917 default:
918 panic("Zone_change: Wrong Item Type!");
919 /* break; */
920 #endif
921 }
922 lock_zone_init(zone);
923 }
924
925 /*
926 * Return the expected number of free elements in the zone.
927 * This calculation will be incorrect if items are zfree'd that
928 * were never zalloc'd/zget'd. The correct way to stuff memory
929 * into a zone is by zcram.
930 */
931
932 integer_t
933 zone_free_count(zone_t zone)
934 {
935 integer_t free_count;
936
937 lock_zone(zone);
938 free_count = zone->cur_size/zone->elem_size - zone->count;
939 unlock_zone(zone);
940
941 assert(free_count >= 0);
942
943 return(free_count);
944 }
945
946 /*
947 * zprealloc preallocates wired memory, exanding the specified
948 * zone to the specified size
949 */
950 void
951 zprealloc(
952 zone_t zone,
953 vm_size_t size)
954 {
955 vm_offset_t addr;
956
957 if (size != 0) {
958 if (kmem_alloc_wired(zone_map, &addr, size) != KERN_SUCCESS)
959 panic("zprealloc");
960 zone_page_init(addr, size, ZONE_PAGE_USED);
961 zcram(zone, addr, size);
962 }
963 }
964
965 /*
966 * Zone garbage collection subroutines
967 */
968
969 boolean_t
970 zone_page_collectable(
971 vm_offset_t addr,
972 vm_size_t size)
973 {
974 struct zone_page_table_entry *zp;
975 natural_t i, j;
976
977 #if MACH_ASSERT
978 if (!from_zone_map(addr, size))
979 panic("zone_page_collectable");
980 #endif
981
982 i = atop_32(addr-zone_map_min_address);
983 j = atop_32((addr+size-1) - zone_map_min_address);
984
985 for (zp = zone_page_table + i; i <= j; zp++, i++)
986 if (zp->collect_count == zp->alloc_count)
987 return (TRUE);
988
989 return (FALSE);
990 }
991
992 void
993 zone_page_keep(
994 vm_offset_t addr,
995 vm_size_t size)
996 {
997 struct zone_page_table_entry *zp;
998 natural_t i, j;
999
1000 #if MACH_ASSERT
1001 if (!from_zone_map(addr, size))
1002 panic("zone_page_keep");
1003 #endif
1004
1005 i = atop_32(addr-zone_map_min_address);
1006 j = atop_32((addr+size-1) - zone_map_min_address);
1007
1008 for (zp = zone_page_table + i; i <= j; zp++, i++)
1009 zp->collect_count = 0;
1010 }
1011
1012 void
1013 zone_page_collect(
1014 vm_offset_t addr,
1015 vm_size_t size)
1016 {
1017 struct zone_page_table_entry *zp;
1018 natural_t i, j;
1019
1020 #if MACH_ASSERT
1021 if (!from_zone_map(addr, size))
1022 panic("zone_page_collect");
1023 #endif
1024
1025 i = atop_32(addr-zone_map_min_address);
1026 j = atop_32((addr+size-1) - zone_map_min_address);
1027
1028 for (zp = zone_page_table + i; i <= j; zp++, i++)
1029 ++zp->collect_count;
1030 }
1031
1032 void
1033 zone_page_init(
1034 vm_offset_t addr,
1035 vm_size_t size,
1036 int value)
1037 {
1038 struct zone_page_table_entry *zp;
1039 natural_t i, j;
1040
1041 #if MACH_ASSERT
1042 if (!from_zone_map(addr, size))
1043 panic("zone_page_init");
1044 #endif
1045
1046 i = atop_32(addr-zone_map_min_address);
1047 j = atop_32((addr+size-1) - zone_map_min_address);
1048
1049 for (zp = zone_page_table + i; i <= j; zp++, i++) {
1050 zp->alloc_count = value;
1051 zp->collect_count = 0;
1052 }
1053 }
1054
1055 void
1056 zone_page_alloc(
1057 vm_offset_t addr,
1058 vm_size_t size)
1059 {
1060 struct zone_page_table_entry *zp;
1061 natural_t i, j;
1062
1063 #if MACH_ASSERT
1064 if (!from_zone_map(addr, size))
1065 panic("zone_page_alloc");
1066 #endif
1067
1068 i = atop_32(addr-zone_map_min_address);
1069 j = atop_32((addr+size-1) - zone_map_min_address);
1070
1071 for (zp = zone_page_table + i; i <= j; zp++, i++) {
1072 /*
1073 * Set alloc_count to (ZONE_PAGE_USED + 1) if
1074 * it was previously set to ZONE_PAGE_UNUSED.
1075 */
1076 if (zp->alloc_count == ZONE_PAGE_UNUSED)
1077 zp->alloc_count = 1;
1078 else
1079 ++zp->alloc_count;
1080 }
1081 }
1082
1083 void
1084 zone_page_free_element(
1085 struct zone_page_table_entry **free_pages,
1086 vm_offset_t addr,
1087 vm_size_t size)
1088 {
1089 struct zone_page_table_entry *zp;
1090 natural_t i, j;
1091
1092 #if MACH_ASSERT
1093 if (!from_zone_map(addr, size))
1094 panic("zone_page_free_element");
1095 #endif
1096
1097 i = atop_32(addr-zone_map_min_address);
1098 j = atop_32((addr+size-1) - zone_map_min_address);
1099
1100 for (zp = zone_page_table + i; i <= j; zp++, i++) {
1101 if (zp->collect_count > 0)
1102 --zp->collect_count;
1103 if (--zp->alloc_count == 0) {
1104 zp->alloc_count = ZONE_PAGE_UNUSED;
1105 zp->collect_count = 0;
1106
1107 zp->link = *free_pages;
1108 *free_pages = zp;
1109 }
1110 }
1111 }
1112
1113
1114 /* This is used for walking through a zone's free element list.
1115 */
1116 struct zone_free_element {
1117 struct zone_free_element * next;
1118 };
1119
1120 struct {
1121 uint32_t pgs_freed;
1122
1123 uint32_t elems_collected,
1124 elems_freed,
1125 elems_kept;
1126 } zgc_stats;
1127
1128 /* Zone garbage collection
1129 *
1130 * zone_gc will walk through all the free elements in all the
1131 * zones that are marked collectable looking for reclaimable
1132 * pages. zone_gc is called by consider_zone_gc when the system
1133 * begins to run out of memory.
1134 */
1135 void
1136 zone_gc(void)
1137 {
1138 unsigned int max_zones;
1139 zone_t z;
1140 unsigned int i;
1141 struct zone_page_table_entry *zp, *zone_free_pages;
1142
1143 mutex_lock(&zone_gc_lock);
1144
1145 simple_lock(&all_zones_lock);
1146 max_zones = num_zones;
1147 z = first_zone;
1148 simple_unlock(&all_zones_lock);
1149
1150 #if MACH_ASSERT
1151 for (i = 0; i < zone_pages; i++)
1152 assert(zone_page_table[i].collect_count == 0);
1153 #endif /* MACH_ASSERT */
1154
1155 zone_free_pages = NULL;
1156
1157 for (i = 0; i < max_zones; i++, z = z->next_zone) {
1158 unsigned int n;
1159 vm_size_t elt_size, size_freed;
1160 struct zone_free_element *elt, *prev, *scan, *keep, *tail;
1161
1162 assert(z != ZONE_NULL);
1163
1164 if (!z->collectable)
1165 continue;
1166
1167 lock_zone(z);
1168
1169 elt_size = z->elem_size;
1170
1171 /*
1172 * Do a quick feasability check before we scan the zone:
1173 * skip unless there is likelihood of getting 1+ pages back.
1174 */
1175 if (z->cur_size - z->count * elt_size <= 2 * PAGE_SIZE){
1176 unlock_zone(z);
1177 continue;
1178 }
1179
1180 /*
1181 * Snatch all of the free elements away from the zone.
1182 */
1183
1184 scan = (void *)z->free_elements;
1185 (void *)z->free_elements = NULL;
1186
1187 unlock_zone(z);
1188
1189 /*
1190 * Pass 1:
1191 *
1192 * Determine which elements we can attempt to collect
1193 * and count them up in the page table. Foreign elements
1194 * are returned to the zone.
1195 */
1196
1197 prev = (void *)&scan;
1198 elt = scan;
1199 n = 0; tail = keep = NULL;
1200 while (elt != NULL) {
1201 if (from_zone_map(elt, elt_size)) {
1202 zone_page_collect((vm_offset_t)elt, elt_size);
1203
1204 prev = elt;
1205 elt = elt->next;
1206
1207 ++zgc_stats.elems_collected;
1208 }
1209 else {
1210 if (keep == NULL)
1211 keep = tail = elt;
1212 else
1213 tail = tail->next = elt;
1214
1215 elt = prev->next = elt->next;
1216 tail->next = NULL;
1217 }
1218
1219 /*
1220 * Dribble back the elements we are keeping.
1221 */
1222
1223 if (++n >= 50 && keep != NULL) {
1224 lock_zone(z);
1225
1226 tail->next = (void *)z->free_elements;
1227 (void *)z->free_elements = keep;
1228
1229 unlock_zone(z);
1230
1231 n = 0; tail = keep = NULL;
1232 }
1233 }
1234
1235 /*
1236 * Return any remaining elements.
1237 */
1238
1239 if (keep != NULL) {
1240 lock_zone(z);
1241
1242 tail->next = (void *)z->free_elements;
1243 (void *)z->free_elements = keep;
1244
1245 unlock_zone(z);
1246 }
1247
1248 /*
1249 * Pass 2:
1250 *
1251 * Determine which pages we can reclaim and
1252 * free those elements.
1253 */
1254
1255 size_freed = 0;
1256 prev = (void *)&scan;
1257 elt = scan;
1258 n = 0; tail = keep = NULL;
1259 while (elt != NULL) {
1260 if (zone_page_collectable((vm_offset_t)elt, elt_size)) {
1261 size_freed += elt_size;
1262 zone_page_free_element(&zone_free_pages,
1263 (vm_offset_t)elt, elt_size);
1264
1265 elt = prev->next = elt->next;
1266
1267 ++zgc_stats.elems_freed;
1268 }
1269 else {
1270 zone_page_keep((vm_offset_t)elt, elt_size);
1271
1272 if (keep == NULL)
1273 keep = tail = elt;
1274 else
1275 tail = tail->next = elt;
1276
1277 elt = prev->next = elt->next;
1278 tail->next = NULL;
1279
1280 ++zgc_stats.elems_kept;
1281 }
1282
1283 /*
1284 * Dribble back the elements we are keeping,
1285 * and update the zone size info.
1286 */
1287
1288 if (++n >= 50 && keep != NULL) {
1289 lock_zone(z);
1290
1291 z->cur_size -= size_freed;
1292 size_freed = 0;
1293
1294 tail->next = (void *)z->free_elements;
1295 (void *)z->free_elements = keep;
1296
1297 unlock_zone(z);
1298
1299 n = 0; tail = keep = NULL;
1300 }
1301 }
1302
1303 /*
1304 * Return any remaining elements, and update
1305 * the zone size info.
1306 */
1307
1308 if (size_freed > 0 || keep != NULL) {
1309 lock_zone(z);
1310
1311 z->cur_size -= size_freed;
1312
1313 if (keep != NULL) {
1314 tail->next = (void *)z->free_elements;
1315 (void *)z->free_elements = keep;
1316 }
1317
1318 unlock_zone(z);
1319 }
1320 }
1321
1322 /*
1323 * Reclaim the pages we are freeing.
1324 */
1325
1326 while ((zp = zone_free_pages) != NULL) {
1327 zone_free_pages = zp->link;
1328 kmem_free(zone_map, zone_map_min_address + PAGE_SIZE *
1329 (zp - zone_page_table), PAGE_SIZE);
1330 ++zgc_stats.pgs_freed;
1331 }
1332
1333 mutex_unlock(&zone_gc_lock);
1334 }
1335
1336 /*
1337 * consider_zone_gc:
1338 *
1339 * Called by the pageout daemon when the system needs more free pages.
1340 */
1341
1342 void
1343 consider_zone_gc(void)
1344 {
1345 /*
1346 * By default, don't attempt zone GC more frequently
1347 * than once / 2 seconds.
1348 */
1349
1350 if (zone_gc_max_rate == 0)
1351 zone_gc_max_rate = (2 << SCHED_TICK_SHIFT) + 1;
1352
1353 if (zone_gc_allowed &&
1354 ((sched_tick > (zone_gc_last_tick + zone_gc_max_rate)) ||
1355 zone_gc_forced)) {
1356 zone_gc_forced = FALSE;
1357 zone_gc_last_tick = sched_tick;
1358 zone_gc();
1359 }
1360 }
1361
1362 #include <mach/kern_return.h>
1363 #include <mach/machine/vm_types.h>
1364 #include <mach_debug/zone_info.h>
1365 #include <kern/host.h>
1366 #include <vm/vm_map.h>
1367 #include <vm/vm_kern.h>
1368
1369 #include <mach/mach_host_server.h>
1370
1371 kern_return_t
1372 host_zone_info(
1373 host_t host,
1374 zone_name_array_t *namesp,
1375 mach_msg_type_number_t *namesCntp,
1376 zone_info_array_t *infop,
1377 mach_msg_type_number_t *infoCntp)
1378 {
1379 zone_name_t *names;
1380 vm_offset_t names_addr;
1381 vm_size_t names_size;
1382 zone_info_t *info;
1383 vm_offset_t info_addr;
1384 vm_size_t info_size;
1385 unsigned int max_zones, i;
1386 zone_t z;
1387 zone_name_t *zn;
1388 zone_info_t *zi;
1389 kern_return_t kr;
1390
1391 if (host == HOST_NULL)
1392 return KERN_INVALID_HOST;
1393
1394 /*
1395 * We assume that zones aren't freed once allocated.
1396 * We won't pick up any zones that are allocated later.
1397 */
1398
1399 simple_lock(&all_zones_lock);
1400 #ifdef ppc
1401 max_zones = num_zones + 4;
1402 #else
1403 max_zones = num_zones + 2;
1404 #endif
1405 z = first_zone;
1406 simple_unlock(&all_zones_lock);
1407
1408 if (max_zones <= *namesCntp) {
1409 /* use in-line memory */
1410
1411 names = *namesp;
1412 } else {
1413 names_size = round_page_32(max_zones * sizeof *names);
1414 kr = kmem_alloc_pageable(ipc_kernel_map,
1415 &names_addr, names_size);
1416 if (kr != KERN_SUCCESS)
1417 return kr;
1418 names = (zone_name_t *) names_addr;
1419 }
1420
1421 if (max_zones <= *infoCntp) {
1422 /* use in-line memory */
1423
1424 info = *infop;
1425 } else {
1426 info_size = round_page_32(max_zones * sizeof *info);
1427 kr = kmem_alloc_pageable(ipc_kernel_map,
1428 &info_addr, info_size);
1429 if (kr != KERN_SUCCESS) {
1430 if (names != *namesp)
1431 kmem_free(ipc_kernel_map,
1432 names_addr, names_size);
1433 return kr;
1434 }
1435
1436 info = (zone_info_t *) info_addr;
1437 }
1438 zn = &names[0];
1439 zi = &info[0];
1440
1441 for (i = 0; i < num_zones; i++) {
1442 struct zone zcopy;
1443
1444 assert(z != ZONE_NULL);
1445
1446 lock_zone(z);
1447 zcopy = *z;
1448 unlock_zone(z);
1449
1450 simple_lock(&all_zones_lock);
1451 z = z->next_zone;
1452 simple_unlock(&all_zones_lock);
1453
1454 /* assuming here the name data is static */
1455 (void) strncpy(zn->zn_name, zcopy.zone_name,
1456 sizeof zn->zn_name);
1457
1458 zi->zi_count = zcopy.count;
1459 zi->zi_cur_size = zcopy.cur_size;
1460 zi->zi_max_size = zcopy.max_size;
1461 zi->zi_elem_size = zcopy.elem_size;
1462 zi->zi_alloc_size = zcopy.alloc_size;
1463 zi->zi_exhaustible = zcopy.exhaustible;
1464 zi->zi_collectable = zcopy.collectable;
1465
1466 zn++;
1467 zi++;
1468 }
1469 strcpy(zn->zn_name, "kernel_stacks");
1470 stack_fake_zone_info(&zi->zi_count, &zi->zi_cur_size, &zi->zi_max_size, &zi->zi_elem_size,
1471 &zi->zi_alloc_size, &zi->zi_collectable, &zi->zi_exhaustible);
1472 zn++;
1473 zi++;
1474 #ifdef ppc
1475 strcpy(zn->zn_name, "save_areas");
1476 save_fake_zone_info(&zi->zi_count, &zi->zi_cur_size, &zi->zi_max_size, &zi->zi_elem_size,
1477 &zi->zi_alloc_size, &zi->zi_collectable, &zi->zi_exhaustible);
1478 zn++;
1479 zi++;
1480
1481 strcpy(zn->zn_name, "pmap_mappings");
1482 mapping_fake_zone_info(&zi->zi_count, &zi->zi_cur_size, &zi->zi_max_size, &zi->zi_elem_size,
1483 &zi->zi_alloc_size, &zi->zi_collectable, &zi->zi_exhaustible);
1484 zn++;
1485 zi++;
1486 #endif
1487 strcpy(zn->zn_name, "kalloc.large");
1488 kalloc_fake_zone_info(&zi->zi_count, &zi->zi_cur_size, &zi->zi_max_size, &zi->zi_elem_size,
1489 &zi->zi_alloc_size, &zi->zi_collectable, &zi->zi_exhaustible);
1490
1491 if (names != *namesp) {
1492 vm_size_t used;
1493 vm_map_copy_t copy;
1494
1495 used = max_zones * sizeof *names;
1496
1497 if (used != names_size)
1498 bzero((char *) (names_addr + used), names_size - used);
1499
1500 kr = vm_map_copyin(ipc_kernel_map, names_addr, names_size,
1501 TRUE, &copy);
1502 assert(kr == KERN_SUCCESS);
1503
1504 *namesp = (zone_name_t *) copy;
1505 }
1506 *namesCntp = max_zones;
1507
1508 if (info != *infop) {
1509 vm_size_t used;
1510 vm_map_copy_t copy;
1511
1512 used = max_zones * sizeof *info;
1513
1514 if (used != info_size)
1515 bzero((char *) (info_addr + used), info_size - used);
1516
1517 kr = vm_map_copyin(ipc_kernel_map, info_addr, info_size,
1518 TRUE, &copy);
1519 assert(kr == KERN_SUCCESS);
1520
1521 *infop = (zone_info_t *) copy;
1522 }
1523 *infoCntp = max_zones;
1524
1525 return KERN_SUCCESS;
1526 }
1527
1528 #if MACH_KDB
1529 #include <ddb/db_command.h>
1530 #include <ddb/db_output.h>
1531 #include <kern/kern_print.h>
1532
1533 const char *zone_labels =
1534 "ENTRY COUNT TOT_SZ MAX_SZ ELT_SZ ALLOC_SZ NAME";
1535
1536 /* Forwards */
1537 void db_print_zone(
1538 zone_t addr);
1539
1540 #if ZONE_DEBUG
1541 void db_zone_check_active(
1542 zone_t zone);
1543 void db_zone_print_active(
1544 zone_t zone);
1545 #endif /* ZONE_DEBUG */
1546 void db_zone_print_free(
1547 zone_t zone);
1548 void
1549 db_print_zone(
1550 zone_t addr)
1551 {
1552 struct zone zcopy;
1553
1554 zcopy = *addr;
1555
1556 db_printf("%8x %8x %8x %8x %6x %8x %s ",
1557 addr, zcopy.count, zcopy.cur_size,
1558 zcopy.max_size, zcopy.elem_size,
1559 zcopy.alloc_size, zcopy.zone_name);
1560 if (zcopy.exhaustible)
1561 db_printf("H");
1562 if (zcopy.collectable)
1563 db_printf("C");
1564 if (zcopy.expandable)
1565 db_printf("X");
1566 db_printf("\n");
1567 }
1568
1569 /*ARGSUSED*/
1570 void
1571 db_show_one_zone(
1572 db_expr_t addr,
1573 int have_addr,
1574 db_expr_t count,
1575 char * modif)
1576 {
1577 struct zone *z = (zone_t)addr;
1578
1579 if (z == ZONE_NULL || !have_addr){
1580 db_error("No Zone\n");
1581 /*NOTREACHED*/
1582 }
1583
1584 db_printf("%s\n", zone_labels);
1585 db_print_zone(z);
1586 }
1587
1588 /*ARGSUSED*/
1589 void
1590 db_show_all_zones(
1591 db_expr_t addr,
1592 int have_addr,
1593 db_expr_t count,
1594 char * modif)
1595 {
1596 zone_t z;
1597 unsigned total = 0;
1598
1599 /*
1600 * Don't risk hanging by unconditionally locking,
1601 * risk of incoherent data is small (zones aren't freed).
1602 */
1603 have_addr = simple_lock_try(&all_zones_lock);
1604 count = num_zones;
1605 z = first_zone;
1606 if (have_addr) {
1607 simple_unlock(&all_zones_lock);
1608 }
1609
1610 db_printf("%s\n", zone_labels);
1611 for ( ; count > 0; count--) {
1612 if (!z) {
1613 db_error("Mangled Zone List\n");
1614 /*NOTREACHED*/
1615 }
1616 db_print_zone(z);
1617 total += z->cur_size,
1618
1619 have_addr = simple_lock_try(&all_zones_lock);
1620 z = z->next_zone;
1621 if (have_addr) {
1622 simple_unlock(&all_zones_lock);
1623 }
1624 }
1625 db_printf("\nTotal %8x", total);
1626 db_printf("\n\nzone_gc() has reclaimed %d pages\n", zgc_stats.pgs_freed);
1627 }
1628
1629 #if ZONE_DEBUG
1630 void
1631 db_zone_check_active(
1632 zone_t zone)
1633 {
1634 int count = 0;
1635 queue_t tmp_elem;
1636
1637 if (!zone_debug_enabled(zone) || !zone_check)
1638 return;
1639 tmp_elem = queue_first(&zone->active_zones);
1640 while (count < zone->count) {
1641 count++;
1642 if (tmp_elem == 0) {
1643 printf("unexpected zero element, zone=0x%x, count=%d\n",
1644 zone, count);
1645 assert(FALSE);
1646 break;
1647 }
1648 if (queue_end(tmp_elem, &zone->active_zones)) {
1649 printf("unexpected queue_end, zone=0x%x, count=%d\n",
1650 zone, count);
1651 assert(FALSE);
1652 break;
1653 }
1654 tmp_elem = queue_next(tmp_elem);
1655 }
1656 if (!queue_end(tmp_elem, &zone->active_zones)) {
1657 printf("not at queue_end, zone=0x%x, tmp_elem=0x%x\n",
1658 zone, tmp_elem);
1659 assert(FALSE);
1660 }
1661 }
1662
1663 void
1664 db_zone_print_active(
1665 zone_t zone)
1666 {
1667 int count = 0;
1668 queue_t tmp_elem;
1669
1670 if (!zone_debug_enabled(zone)) {
1671 printf("zone 0x%x debug not enabled\n", zone);
1672 return;
1673 }
1674 if (!zone_check) {
1675 printf("zone_check FALSE\n");
1676 return;
1677 }
1678
1679 printf("zone 0x%x, active elements %d\n", zone, zone->count);
1680 printf("active list:\n");
1681 tmp_elem = queue_first(&zone->active_zones);
1682 while (count < zone->count) {
1683 printf(" 0x%x", tmp_elem);
1684 count++;
1685 if ((count % 6) == 0)
1686 printf("\n");
1687 if (tmp_elem == 0) {
1688 printf("\nunexpected zero element, count=%d\n", count);
1689 break;
1690 }
1691 if (queue_end(tmp_elem, &zone->active_zones)) {
1692 printf("\nunexpected queue_end, count=%d\n", count);
1693 break;
1694 }
1695 tmp_elem = queue_next(tmp_elem);
1696 }
1697 if (!queue_end(tmp_elem, &zone->active_zones))
1698 printf("\nnot at queue_end, tmp_elem=0x%x\n", tmp_elem);
1699 else
1700 printf("\n");
1701 }
1702 #endif /* ZONE_DEBUG */
1703
1704 void
1705 db_zone_print_free(
1706 zone_t zone)
1707 {
1708 int count = 0;
1709 int freecount;
1710 vm_offset_t elem;
1711
1712 freecount = zone_free_count(zone);
1713 printf("zone 0x%x, free elements %d\n", zone, freecount);
1714 printf("free list:\n");
1715 elem = zone->free_elements;
1716 while (count < freecount) {
1717 printf(" 0x%x", elem);
1718 count++;
1719 if ((count % 6) == 0)
1720 printf("\n");
1721 if (elem == 0) {
1722 printf("\nunexpected zero element, count=%d\n", count);
1723 break;
1724 }
1725 elem = *((vm_offset_t *)elem);
1726 }
1727 if (elem != 0)
1728 printf("\nnot at end of free list, elem=0x%x\n", elem);
1729 else
1730 printf("\n");
1731 }
1732
1733 #endif /* MACH_KDB */
1734
1735
1736 #if ZONE_DEBUG
1737
1738 /* should we care about locks here ? */
1739
1740 #if MACH_KDB
1741 vm_offset_t
1742 next_element(
1743 zone_t z,
1744 vm_offset_t elt)
1745 {
1746 if (!zone_debug_enabled(z))
1747 return(0);
1748 elt -= ZONE_DEBUG_OFFSET;
1749 elt = (vm_offset_t) queue_next((queue_t) elt);
1750 if ((queue_t) elt == &z->active_zones)
1751 return(0);
1752 elt += ZONE_DEBUG_OFFSET;
1753 return(elt);
1754 }
1755
1756 vm_offset_t
1757 first_element(
1758 zone_t z)
1759 {
1760 vm_offset_t elt;
1761
1762 if (!zone_debug_enabled(z))
1763 return(0);
1764 if (queue_empty(&z->active_zones))
1765 return(0);
1766 elt = (vm_offset_t) queue_first(&z->active_zones);
1767 elt += ZONE_DEBUG_OFFSET;
1768 return(elt);
1769 }
1770
1771 /*
1772 * Second arg controls how many zone elements are printed:
1773 * 0 => none
1774 * n, n < 0 => all
1775 * n, n > 0 => last n on active list
1776 */
1777 int
1778 zone_count(
1779 zone_t z,
1780 int tail)
1781 {
1782 vm_offset_t elt;
1783 int count = 0;
1784 boolean_t print = (tail != 0);
1785
1786 if (tail < 0)
1787 tail = z->count;
1788 if (z->count < tail)
1789 tail = 0;
1790 tail = z->count - tail;
1791 for (elt = first_element(z); elt; elt = next_element(z, elt)) {
1792 if (print && tail <= count)
1793 db_printf("%8x\n", elt);
1794 count++;
1795 }
1796 assert(count == z->count);
1797 return(count);
1798 }
1799 #endif /* MACH_KDB */
1800
1801 #define zone_in_use(z) ( z->count || z->free_elements )
1802
1803 void
1804 zone_debug_enable(
1805 zone_t z)
1806 {
1807 if (zone_debug_enabled(z) || zone_in_use(z) ||
1808 z->alloc_size < (z->elem_size + ZONE_DEBUG_OFFSET))
1809 return;
1810 queue_init(&z->active_zones);
1811 z->elem_size += ZONE_DEBUG_OFFSET;
1812 }
1813
1814 void
1815 zone_debug_disable(
1816 zone_t z)
1817 {
1818 if (!zone_debug_enabled(z) || zone_in_use(z))
1819 return;
1820 z->elem_size -= ZONE_DEBUG_OFFSET;
1821 z->active_zones.next = z->active_zones.prev = 0;
1822 }
1823 #endif /* ZONE_DEBUG */