/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
*
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the
+ * License may not be used to create, or enable the creation or
+ * redistribution of, unlawful or unlicensed copies of an Apple operating
+ * system, or to circumvent, violate, or enable the circumvention or
+ * violation of, any terms of an Apple operating system software license
+ * agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
* file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
* limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
+ *
+ * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
#include <zone_debug.h>
#include <norma_vm.h>
#include <mach_kdb.h>
-#include <kern/ast.h>
+
+#include <mach/mach_types.h>
+#include <mach/vm_param.h>
+#include <mach/kern_return.h>
+#include <mach/mach_host_server.h>
+#include <mach/machine/vm_types.h>
+#include <mach_debug/zone_info.h>
+
+#include <kern/kern_types.h>
#include <kern/assert.h>
+#include <kern/host.h>
#include <kern/macro_help.h>
#include <kern/sched.h>
#include <kern/lock.h>
#include <kern/misc_protos.h>
#include <kern/thread_call.h>
#include <kern/zalloc.h>
-#include <mach/vm_param.h>
+#include <kern/kalloc.h>
+
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+
#include <machine/machparam.h>
+#if defined(__ppc__)
+/* for fake zone stat routines */
+#include <ppc/savearea.h>
+#include <ppc/mappings.h>
+#endif
#if MACH_ASSERT
/* Detect use of zone elt after freeing it by two methods:
#if defined(__alpha)
#define is_kernel_data_addr(a) \
- (!(a) || IS_SYS_VA(a) && !((a) & (sizeof(long)-1)))
+ (!(a) || (IS_SYS_VA(a) && !((a) & (sizeof(long)-1))))
#else /* !defined(__alpha) */
#define is_kernel_data_addr(a) \
- (!(a) || (a) >= VM_MIN_KERNEL_ADDRESS && !((a) & 0x3))
+ (!(a) || ((a) >= vm_min_kernel_address && !((a) & 0x3)))
#endif /* defined(__alpha) */
#define ADD_TO_ZONE(zone, element) \
MACRO_BEGIN \
if (zfree_clear) \
- { int i; \
+ { unsigned int i; \
for (i=1; \
i < zone->elem_size/sizeof(vm_offset_t) - 1; \
i++) \
#define lock_zone(zone) \
MACRO_BEGIN \
- simple_lock(&(zone)->lock); \
+ mutex_lock(&(zone)->lock); \
MACRO_END
#define unlock_zone(zone) \
MACRO_BEGIN \
- simple_unlock(&(zone)->lock); \
+ mutex_unlock(&(zone)->lock); \
MACRO_END
#define zone_wakeup(zone) thread_wakeup((event_t)(zone))
#define zone_sleep(zone) \
- thread_sleep_simple_lock((event_t)(zone), \
+ thread_sleep_mutex((event_t)(zone), \
&(zone)->lock, \
THREAD_UNINT)
#define lock_zone_init(zone) \
MACRO_BEGIN \
- simple_lock_init(&zone->lock, ETAP_MISC_ZONE); \
+ mutex_init(&zone->lock, 0); \
MACRO_END
-#define lock_try_zone(zone) simple_lock_try(&zone->lock)
+#define lock_try_zone(zone) mutex_try(&zone->lock)
kern_return_t zget_space(
vm_offset_t size,
struct zone_page_table_entry * zone_page_table;
vm_offset_t zone_map_min_address;
vm_offset_t zone_map_max_address;
-integer_t zone_pages;
+unsigned int zone_pages;
/*
* Exclude more than one concurrent garbage collection
decl_simple_lock_data(, all_zones_lock)
zone_t first_zone;
zone_t *last_zone;
-int num_zones;
+unsigned int num_zones;
boolean_t zone_gc_allowed = TRUE;
boolean_t zone_gc_forced = FALSE;
vm_size_t size, /* the size of an element */
vm_size_t max, /* maximum memory to use */
vm_size_t alloc, /* allocation size */
- char *name) /* a name for the zone */
+ const char *name) /* a name for the zone */
{
zone_t z;
((size-1) % sizeof(z->free_elements));
if (alloc == 0)
alloc = PAGE_SIZE;
- alloc = round_page_32(alloc);
- max = round_page_32(max);
+ alloc = round_page(alloc);
+ max = round_page(max);
/*
- * We look for an allocation size with least fragmentation
- * in the range of 1 - 5 pages. This size will be used unless
+ * we look for an allocation size with less than 1% waste
+ * up to 5 pages in size...
+ * otherwise, we look for an allocation size with least fragmentation
+ * in the range of 1 - 5 pages
+ * This size will be used unless
* the user suggestion is larger AND has less fragmentation
*/
{ vm_size_t best, waste; unsigned int i;
best = PAGE_SIZE;
waste = best % size;
- for (i = 2; i <= 5; i++){ vm_size_t tsize, twaste;
- tsize = i * PAGE_SIZE;
+
+ for (i = 1; i <= 5; i++) {
+ vm_size_t tsize, twaste;
+
+ tsize = i * PAGE_SIZE;
+
+ if ((tsize % size) < (tsize / 100)) {
+ alloc = tsize;
+ goto use_this_allocation;
+ }
twaste = tsize % size;
if (twaste < waste)
best = tsize, waste = twaste;
if (alloc <= best || (alloc % size >= waste))
alloc = best;
}
+use_this_allocation:
if (max && (max < alloc))
max = alloc;
z->zone_name = name;
z->count = 0;
z->doing_alloc = FALSE;
+ z->doing_gc = FALSE;
z->exhaustible = FALSE;
z->collectable = TRUE;
z->allows_foreign = FALSE;
void
zcram(
register zone_t zone,
- vm_offset_t newmem,
+ void *newaddr,
vm_size_t size)
{
register vm_size_t elem_size;
+ vm_offset_t newmem = (vm_offset_t) newaddr;
/* Basic sanity checks */
assert(zone != ZONE_NULL && newmem != (vm_offset_t)0);
vm_offset_t *result)
{
vm_offset_t new_space = 0;
- vm_size_t space_to_add;
+ vm_size_t space_to_add = 0;
simple_lock(&zget_space_lock);
while ((zalloc_next_space + size) > zalloc_end_of_space) {
* Add at least one page to allocation area.
*/
- space_to_add = round_page_32(size);
+ space_to_add = round_page(size);
if (new_space == 0) {
kern_return_t retval;
void
zone_steal_memory(void)
{
- zdata_size = round_page_32(128*sizeof(struct zone));
- zdata = pmap_steal_memory(zdata_size);
+ zdata_size = round_page(128*sizeof(struct zone));
+ zdata = (vm_offset_t)((char *)pmap_steal_memory(zdata_size) - (char *)0);
}
if (nelem <= 0)
return 0;
size = nelem * zone->elem_size;
- size = round_page_32(size);
+ size = round_page(size);
kr = kmem_alloc_wired(kernel_map, &memory, size);
if (kr != KERN_SUCCESS)
return 0;
zone_change(zone, Z_FOREIGN, TRUE);
- zcram(zone, memory, size);
+ zcram(zone, (void *)memory, size);
nalloc = size / zone->elem_size;
assert(nalloc >= nelem);
vm_size_t zone_zone_size;
vm_offset_t zone_zone_space;
- simple_lock_init(&all_zones_lock, ETAP_MISC_ZONE_ALL);
+ simple_lock_init(&all_zones_lock, 0);
first_zone = ZONE_NULL;
last_zone = &first_zone;
num_zones = 0;
- simple_lock_init(&zget_space_lock, ETAP_MISC_ZONE_GET);
+ simple_lock_init(&zget_space_lock, 0);
zalloc_next_space = zdata;
zalloc_end_of_space = zdata + zdata_size;
zalloc_wasted_space = 0;
zone_change(zone_zone, Z_COLLECT, FALSE);
zone_zone_size = zalloc_end_of_space - zalloc_next_space;
zget_space(zone_zone_size, &zone_zone_space);
- zcram(zone_zone, zone_zone_space, zone_zone_size);
+ zcram(zone_zone, (void *)zone_zone_space, zone_zone_size);
}
void
vm_size_t zone_table_size;
retval = kmem_suballoc(kernel_map, &zone_min, max_zonemap_size,
- FALSE, TRUE, &zone_map);
+ FALSE, VM_FLAGS_ANYWHERE, &zone_map);
+
if (retval != KERN_SUCCESS)
panic("zone_init: kmem_suballoc failed");
- zone_max = zone_min + round_page_32(max_zonemap_size);
+ zone_max = zone_min + round_page(max_zonemap_size);
/*
* Setup garbage collection information:
*/
if (kmem_alloc_wired(zone_map, (vm_offset_t *) &zone_page_table,
zone_table_size) != KERN_SUCCESS)
panic("zone_init");
- zone_min = (vm_offset_t)zone_page_table + round_page_32(zone_table_size);
+ zone_min = (vm_offset_t)zone_page_table + round_page(zone_table_size);
zone_pages = atop_32(zone_max - zone_min);
zone_map_min_address = zone_min;
zone_map_max_address = zone_max;
- mutex_init(&zone_gc_lock, ETAP_NO_TRACE);
+ mutex_init(&zone_gc_lock, 0);
zone_page_init(zone_min, zone_max - zone_min, ZONE_PAGE_UNUSED);
}
/*
* zalloc returns an element from the specified zone.
*/
-vm_offset_t
+void *
zalloc_canblock(
register zone_t zone,
boolean_t canblock)
kern_return_t retval;
assert(zone != ZONE_NULL);
- check_simple_locks();
lock_zone(zone);
REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
+ while ((addr == 0) && canblock && (zone->doing_gc)) {
+ zone->waiting = TRUE;
+ zone_sleep(zone);
+ REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
+ }
+
while ((addr == 0) && canblock) {
/*
* If nothing was there, try to get more
if (vm_pool_low() || retry == TRUE)
alloc_size =
- round_page_32(zone->elem_size);
+ round_page(zone->elem_size);
else
alloc_size = zone->alloc_size;
if (retval == KERN_SUCCESS) {
zone_page_init(space, alloc_size,
ZONE_PAGE_USED);
- zcram(zone, space, alloc_size);
+ zcram(zone, (void *)space, alloc_size);
break;
} else if (retval != KERN_RESOURCE_SHORTAGE) {
/* would like to cause a zone_gc() */
if (retry == TRUE)
- panic("zalloc");
+ panic("zalloc: \"%s\" (%d elements) retry fail %d", zone->zone_name, zone->count, retval);
retry = TRUE;
+ } else {
+ break;
}
}
lock_zone(zone);
if (zone_debug_enabled(zone))
space += ZONE_DEBUG_OFFSET;
#endif
- return(space);
+ return((void *)space);
}
if (retval == KERN_RESOURCE_SHORTAGE) {
unlock_zone(zone);
VM_PAGE_WAIT();
lock_zone(zone);
} else {
- panic("zalloc");
+ panic("zalloc: \"%s\" (%d elements) zget_space returned %d", zone->zone_name, zone->count, retval);
}
}
}
unlock_zone(zone);
- return(addr);
+ return((void *)addr);
}
-vm_offset_t
+void *
zalloc(
register zone_t zone)
{
return( zalloc_canblock(zone, TRUE) );
}
-vm_offset_t
+void *
zalloc_noblock(
register zone_t zone)
{
void
zalloc_async(
- thread_call_param_t p0,
- thread_call_param_t p1)
+ thread_call_param_t p0,
+ __unused thread_call_param_t p1)
{
- vm_offset_t elt;
+ void *elt;
elt = zalloc_canblock((zone_t)p0, TRUE);
zfree((zone_t)p0, elt);
* This form should be used when you can not block (like when
* processing an interrupt).
*/
-vm_offset_t
+void *
zget(
register zone_t zone)
{
assert( zone != ZONE_NULL );
if (!lock_try_zone(zone))
- return ((vm_offset_t)0);
+ return NULL;
REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
#if ZONE_DEBUG
#endif /* ZONE_DEBUG */
unlock_zone(zone);
- return(addr);
+ return((void *) addr);
}
/* Keep this FALSE by default. Large memory machine run orders of magnitude
void
zfree(
register zone_t zone,
- vm_offset_t elem)
+ void *addr)
{
+ vm_offset_t elem = (vm_offset_t) addr;
#if MACH_ASSERT
/* Basic sanity checks */
!from_zone_map(elem, zone->elem_size)) {
#if MACH_ASSERT
panic("zfree: non-allocated memory in collectable zone!");
-#else
+#endif
zone_last_bogus_zone = zone;
zone_last_bogus_elem = elem;
return;
-#endif
}
lock_zone(zone);
if (kmem_alloc_wired(zone_map, &addr, size) != KERN_SUCCESS)
panic("zprealloc");
zone_page_init(addr, size, ZONE_PAGE_USED);
- zcram(zone, addr, size);
+ zcram(zone, (void *)addr, size);
}
}
zone_free_pages = NULL;
for (i = 0; i < max_zones; i++, z = z->next_zone) {
- unsigned int n;
+ unsigned int n, m;
vm_size_t elt_size, size_freed;
- struct zone_free_element *elt, *prev, *scan, *keep, *tail;
+ struct zone_free_element *elt, *base_elt, *base_prev, *prev, *scan, *keep, *tail;
assert(z != ZONE_NULL);
/*
* Do a quick feasability check before we scan the zone:
- * skip unless there is likelihood of getting 1+ pages back.
+ * skip unless there is likelihood of getting pages back
+ * (i.e we need a whole allocation block's worth of free
+ * elements before we can garbage collect) and
+ * the zone has more than 10 percent of it's elements free
*/
- if (z->cur_size - z->count * elt_size <= 2 * PAGE_SIZE){
+ if (((z->cur_size - z->count * elt_size) <= (2 * z->alloc_size)) ||
+ ((z->cur_size - z->count * elt_size) <= (z->cur_size / 10))) {
unlock_zone(z);
continue;
}
+ z->doing_gc = TRUE;
+
/*
* Snatch all of the free elements away from the zone.
*/
scan = (void *)z->free_elements;
- (void *)z->free_elements = NULL;
+ z->free_elements = 0;
unlock_zone(z);
* Dribble back the elements we are keeping.
*/
- if (++n >= 50 && keep != NULL) {
- lock_zone(z);
-
- tail->next = (void *)z->free_elements;
- (void *)z->free_elements = keep;
+ if (++n >= 50) {
+ if (z->waiting == TRUE) {
+ lock_zone(z);
+
+ if (keep != NULL) {
+ tail->next = (void *)z->free_elements;
+ z->free_elements = (vm_offset_t) keep;
+ tail = keep = NULL;
+ } else {
+ m =0;
+ base_elt = elt;
+ base_prev = prev;
+ while ((elt != NULL) && (++m < 50)) {
+ prev = elt;
+ elt = elt->next;
+ }
+ if (m !=0 ) {
+ prev->next = (void *)z->free_elements;
+ z->free_elements = (vm_offset_t) base_elt;
+ base_prev->next = elt;
+ prev = base_prev;
+ }
+ }
- unlock_zone(z);
+ if (z->waiting) {
+ z->waiting = FALSE;
+ zone_wakeup(z);
+ }
- n = 0; tail = keep = NULL;
+ unlock_zone(z);
+ }
+ n =0;
}
}
lock_zone(z);
tail->next = (void *)z->free_elements;
- (void *)z->free_elements = keep;
+ z->free_elements = (vm_offset_t) keep;
unlock_zone(z);
}
* and update the zone size info.
*/
- if (++n >= 50 && keep != NULL) {
+ if (++n >= 50) {
lock_zone(z);
z->cur_size -= size_freed;
size_freed = 0;
- tail->next = (void *)z->free_elements;
- (void *)z->free_elements = keep;
+ if (keep != NULL) {
+ tail->next = (void *)z->free_elements;
+ z->free_elements = (vm_offset_t) keep;
+ }
+
+ if (z->waiting) {
+ z->waiting = FALSE;
+ zone_wakeup(z);
+ }
unlock_zone(z);
* the zone size info.
*/
+ lock_zone(z);
+
if (size_freed > 0 || keep != NULL) {
- lock_zone(z);
z->cur_size -= size_freed;
if (keep != NULL) {
tail->next = (void *)z->free_elements;
- (void *)z->free_elements = keep;
+ z->free_elements = (vm_offset_t) keep;
}
- unlock_zone(z);
}
+
+ z->doing_gc = FALSE;
+ if (z->waiting) {
+ z->waiting = FALSE;
+ zone_wakeup(z);
+ }
+ unlock_zone(z);
}
/*
{
/*
* By default, don't attempt zone GC more frequently
- * than once / 2 seconds.
+ * than once / 1 minutes.
*/
if (zone_gc_max_rate == 0)
- zone_gc_max_rate = (2 << SCHED_TICK_SHIFT) + 1;
+ zone_gc_max_rate = (60 << SCHED_TICK_SHIFT) + 1;
if (zone_gc_allowed &&
((sched_tick > (zone_gc_last_tick + zone_gc_max_rate)) ||
}
}
-#include <mach/kern_return.h>
-#include <mach/machine/vm_types.h>
-#include <mach_debug/zone_info.h>
-#include <kern/host.h>
-#include <vm/vm_map.h>
-#include <vm/vm_kern.h>
-
-#include <mach/mach_host_server.h>
kern_return_t
host_zone_info(
#ifdef ppc
max_zones = num_zones + 4;
#else
- max_zones = num_zones + 2;
+ max_zones = num_zones + 3; /* ATN: count the number below!! */
#endif
z = first_zone;
simple_unlock(&all_zones_lock);
if (max_zones <= *namesCntp) {
/* use in-line memory */
-
+ names_size = *namesCntp * sizeof *names;
names = *namesp;
} else {
- names_size = round_page_32(max_zones * sizeof *names);
+ names_size = round_page(max_zones * sizeof *names);
kr = kmem_alloc_pageable(ipc_kernel_map,
&names_addr, names_size);
if (kr != KERN_SUCCESS)
if (max_zones <= *infoCntp) {
/* use in-line memory */
-
+ info_size = *infoCntp * sizeof *info;
info = *infop;
} else {
- info_size = round_page_32(max_zones * sizeof *info);
+ info_size = round_page(max_zones * sizeof *info);
kr = kmem_alloc_pageable(ipc_kernel_map,
&info_addr, info_size);
if (kr != KERN_SUCCESS) {
zn++;
zi++;
#endif
+
+#ifdef i386
+ strcpy(zn->zn_name, "page_tables");
+ pt_fake_zone_info(&zi->zi_count, &zi->zi_cur_size, &zi->zi_max_size, &zi->zi_elem_size,
+ &zi->zi_alloc_size, &zi->zi_collectable, &zi->zi_exhaustible);
+ zn++;
+ zi++;
+#endif
+
strcpy(zn->zn_name, "kalloc.large");
kalloc_fake_zone_info(&zi->zi_count, &zi->zi_cur_size, &zi->zi_max_size, &zi->zi_elem_size,
&zi->zi_alloc_size, &zi->zi_collectable, &zi->zi_exhaustible);
if (used != names_size)
bzero((char *) (names_addr + used), names_size - used);
- kr = vm_map_copyin(ipc_kernel_map, names_addr, names_size,
- TRUE, ©);
+ kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
+ (vm_map_size_t)names_size, TRUE, ©);
assert(kr == KERN_SUCCESS);
*namesp = (zone_name_t *) copy;
if (used != info_size)
bzero((char *) (info_addr + used), info_size - used);
- kr = vm_map_copyin(ipc_kernel_map, info_addr, info_size,
- TRUE, ©);
+ kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
+ (vm_map_size_t)info_size, TRUE, ©);
assert(kr == KERN_SUCCESS);
*infop = (zone_info_t *) copy;
/*ARGSUSED*/
void
db_show_one_zone(
- db_expr_t addr,
- int have_addr,
- db_expr_t count,
- char * modif)
+ db_expr_t addr,
+ int have_addr,
+ __unused db_expr_t count,
+ __unused char * modif)
{
- struct zone *z = (zone_t)addr;
+ struct zone *z = (zone_t)((char *)0 + addr);
if (z == ZONE_NULL || !have_addr){
db_error("No Zone\n");
/*ARGSUSED*/
void
db_show_all_zones(
- db_expr_t addr,
- int have_addr,
- db_expr_t count,
- char * modif)
+ __unused db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ __unused char * modif)
{
zone_t z;
unsigned total = 0;
/* should we care about locks here ? */
#if MACH_KDB
-vm_offset_t
+void *
next_element(
zone_t z,
- vm_offset_t elt)
+ void *prev)
{
+ char *elt = (char *)prev;
+
if (!zone_debug_enabled(z))
return(0);
elt -= ZONE_DEBUG_OFFSET;
- elt = (vm_offset_t) queue_next((queue_t) elt);
+ elt = (char *) queue_next((queue_t) elt);
if ((queue_t) elt == &z->active_zones)
return(0);
elt += ZONE_DEBUG_OFFSET;
return(elt);
}
-vm_offset_t
+void *
first_element(
zone_t z)
{
- vm_offset_t elt;
+ char *elt;
if (!zone_debug_enabled(z))
return(0);
if (queue_empty(&z->active_zones))
return(0);
- elt = (vm_offset_t) queue_first(&z->active_zones);
+ elt = (char *)queue_first(&z->active_zones);
elt += ZONE_DEBUG_OFFSET;
return(elt);
}
zone_t z,
int tail)
{
- vm_offset_t elt;
+ void *elt;
int count = 0;
boolean_t print = (tail != 0);