X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/a3d08fcd5120d2aa8303b6349ca8b14e3f284af3..8ad349bb6ed4a0be06e34c92be0d98b92e078db4:/osfmk/kern/zalloc.c diff --git a/osfmk/kern/zalloc.c b/osfmk/kern/zalloc.c index 6f6966c3b..f1d703433 100644 --- a/osfmk/kern/zalloc.c +++ b/osfmk/kern/zalloc.c @@ -1,23 +1,31 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. - * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. - * - * @APPLE_LICENSE_HEADER_END@ + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the + * License may not be used to create, or enable the creation or + * redistribution of, unlawful or unlicensed copies of an Apple operating + * system, or to circumvent, violate, or enable the circumvention or + * violation of, any terms of an Apple operating system software license + * agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -59,8 +67,17 @@ #include #include #include -#include + +#include +#include +#include +#include +#include +#include + +#include #include +#include #include #include #include @@ -68,10 +85,20 @@ #include #include #include -#include +#include + +#include +#include #include +#include + #include +#if defined(__ppc__) +/* for fake zone stat routines */ +#include +#include +#endif #if MACH_ASSERT /* Detect use of zone elt after freeing it by two methods: @@ -83,12 +110,12 @@ #if defined(__alpha) #define is_kernel_data_addr(a) \ - (!(a) || IS_SYS_VA(a) && !((a) & (sizeof(long)-1))) + (!(a) || (IS_SYS_VA(a) && !((a) & (sizeof(long)-1)))) #else /* !defined(__alpha) */ #define is_kernel_data_addr(a) \ - (!(a) || (a) >= VM_MIN_KERNEL_ADDRESS && !((a) & 0x3)) + (!(a) || ((a) >= VM_MIN_KERNEL_ADDRESS && !((a) & 0x3))) #endif /* defined(__alpha) */ @@ -101,7 +128,7 @@ boolean_t zfree_clear = FALSE; #define ADD_TO_ZONE(zone, element) \ MACRO_BEGIN \ if (zfree_clear) \ - { int i; \ + { unsigned int i; \ for (i=1; \ i < zone->elem_size/sizeof(vm_offset_t) - 1; \ i++) \ @@ -211,26 +238,26 @@ vm_size_t zdata_size; #define lock_zone(zone) \ MACRO_BEGIN \ - simple_lock(&(zone)->lock); \ + mutex_lock(&(zone)->lock); \ MACRO_END #define unlock_zone(zone) \ MACRO_BEGIN \ - simple_unlock(&(zone)->lock); \ + mutex_unlock(&(zone)->lock); \ MACRO_END #define zone_wakeup(zone) thread_wakeup((event_t)(zone)) #define zone_sleep(zone) \ - thread_sleep_simple_lock((event_t)(zone), \ + thread_sleep_mutex((event_t)(zone), \ &(zone)->lock, \ THREAD_UNINT) #define lock_zone_init(zone) \ MACRO_BEGIN \ - simple_lock_init(&zone->lock, ETAP_MISC_ZONE); \ + mutex_init(&zone->lock, 0); \ MACRO_END -#define lock_try_zone(zone) simple_lock_try(&zone->lock) +#define lock_try_zone(zone) mutex_try(&zone->lock) kern_return_t zget_space( vm_offset_t size, @@ -247,7 +274,7 @@ vm_size_t zalloc_wasted_space; struct zone_page_table_entry * zone_page_table; vm_offset_t zone_map_min_address; vm_offset_t zone_map_max_address; -integer_t zone_pages; +unsigned int zone_pages; /* * Exclude more than one concurrent garbage collection @@ -269,7 +296,7 @@ decl_mutex_data(, zone_gc_lock) decl_simple_lock_data(, all_zones_lock) zone_t first_zone; zone_t *last_zone; -int num_zones; +unsigned int num_zones; boolean_t zone_gc_allowed = TRUE; boolean_t zone_gc_forced = FALSE; @@ -287,7 +314,7 @@ zinit( vm_size_t size, /* the size of an element */ vm_size_t max, /* maximum memory to use */ vm_size_t alloc, /* allocation size */ - char *name) /* a name for the zone */ + const char *name) /* a name for the zone */ { zone_t z; @@ -309,18 +336,29 @@ zinit( ((size-1) % sizeof(z->free_elements)); if (alloc == 0) alloc = PAGE_SIZE; - alloc = round_page_32(alloc); - max = round_page_32(max); + alloc = round_page(alloc); + max = round_page(max); /* - * We look for an allocation size with least fragmentation - * in the range of 1 - 5 pages. This size will be used unless + * we look for an allocation size with less than 1% waste + * up to 5 pages in size... + * otherwise, we look for an allocation size with least fragmentation + * in the range of 1 - 5 pages + * This size will be used unless * the user suggestion is larger AND has less fragmentation */ { vm_size_t best, waste; unsigned int i; best = PAGE_SIZE; waste = best % size; - for (i = 2; i <= 5; i++){ vm_size_t tsize, twaste; - tsize = i * PAGE_SIZE; + + for (i = 1; i <= 5; i++) { + vm_size_t tsize, twaste; + + tsize = i * PAGE_SIZE; + + if ((tsize % size) < (tsize / 100)) { + alloc = tsize; + goto use_this_allocation; + } twaste = tsize % size; if (twaste < waste) best = tsize, waste = twaste; @@ -328,6 +366,7 @@ zinit( if (alloc <= best || (alloc % size >= waste)) alloc = best; } +use_this_allocation: if (max && (max < alloc)) max = alloc; @@ -374,10 +413,11 @@ zinit( void zcram( register zone_t zone, - vm_offset_t newmem, + void *newaddr, vm_size_t size) { register vm_size_t elem_size; + vm_offset_t newmem = (vm_offset_t) newaddr; /* Basic sanity checks */ assert(zone != ZONE_NULL && newmem != (vm_offset_t)0); @@ -410,7 +450,7 @@ zget_space( vm_offset_t *result) { vm_offset_t new_space = 0; - vm_size_t space_to_add; + vm_size_t space_to_add = 0; simple_lock(&zget_space_lock); while ((zalloc_next_space + size) > zalloc_end_of_space) { @@ -418,7 +458,7 @@ zget_space( * Add at least one page to allocation area. */ - space_to_add = round_page_32(size); + space_to_add = round_page(size); if (new_space == 0) { kern_return_t retval; @@ -487,8 +527,8 @@ zget_space( void zone_steal_memory(void) { - zdata_size = round_page_32(128*sizeof(struct zone)); - zdata = pmap_steal_memory(zdata_size); + zdata_size = round_page(128*sizeof(struct zone)); + zdata = (vm_offset_t)((char *)pmap_steal_memory(zdata_size) - (char *)0); } @@ -513,13 +553,13 @@ zfill( if (nelem <= 0) return 0; size = nelem * zone->elem_size; - size = round_page_32(size); + size = round_page(size); kr = kmem_alloc_wired(kernel_map, &memory, size); if (kr != KERN_SUCCESS) return 0; zone_change(zone, Z_FOREIGN, TRUE); - zcram(zone, memory, size); + zcram(zone, (void *)memory, size); nalloc = size / zone->elem_size; assert(nalloc >= nelem); @@ -537,13 +577,13 @@ zone_bootstrap(void) vm_size_t zone_zone_size; vm_offset_t zone_zone_space; - simple_lock_init(&all_zones_lock, ETAP_MISC_ZONE_ALL); + simple_lock_init(&all_zones_lock, 0); first_zone = ZONE_NULL; last_zone = &first_zone; num_zones = 0; - simple_lock_init(&zget_space_lock, ETAP_MISC_ZONE_GET); + simple_lock_init(&zget_space_lock, 0); zalloc_next_space = zdata; zalloc_end_of_space = zdata + zdata_size; zalloc_wasted_space = 0; @@ -555,7 +595,7 @@ zone_bootstrap(void) zone_change(zone_zone, Z_COLLECT, FALSE); zone_zone_size = zalloc_end_of_space - zalloc_next_space; zget_space(zone_zone_size, &zone_zone_space); - zcram(zone_zone, zone_zone_space, zone_zone_size); + zcram(zone_zone, (void *)zone_zone_space, zone_zone_size); } void @@ -568,10 +608,11 @@ zone_init( vm_size_t zone_table_size; retval = kmem_suballoc(kernel_map, &zone_min, max_zonemap_size, - FALSE, TRUE, &zone_map); + FALSE, VM_FLAGS_ANYWHERE, &zone_map); + if (retval != KERN_SUCCESS) panic("zone_init: kmem_suballoc failed"); - zone_max = zone_min + round_page_32(max_zonemap_size); + zone_max = zone_min + round_page(max_zonemap_size); /* * Setup garbage collection information: */ @@ -580,11 +621,11 @@ zone_init( if (kmem_alloc_wired(zone_map, (vm_offset_t *) &zone_page_table, zone_table_size) != KERN_SUCCESS) panic("zone_init"); - zone_min = (vm_offset_t)zone_page_table + round_page_32(zone_table_size); + zone_min = (vm_offset_t)zone_page_table + round_page(zone_table_size); zone_pages = atop_32(zone_max - zone_min); zone_map_min_address = zone_min; zone_map_max_address = zone_max; - mutex_init(&zone_gc_lock, ETAP_NO_TRACE); + mutex_init(&zone_gc_lock, 0); zone_page_init(zone_min, zone_max - zone_min, ZONE_PAGE_UNUSED); } @@ -592,7 +633,7 @@ zone_init( /* * zalloc returns an element from the specified zone. */ -vm_offset_t +void * zalloc_canblock( register zone_t zone, boolean_t canblock) @@ -601,7 +642,6 @@ zalloc_canblock( kern_return_t retval; assert(zone != ZONE_NULL); - check_simple_locks(); lock_zone(zone); @@ -660,7 +700,7 @@ zalloc_canblock( if (vm_pool_low() || retry == TRUE) alloc_size = - round_page_32(zone->elem_size); + round_page(zone->elem_size); else alloc_size = zone->alloc_size; @@ -670,13 +710,13 @@ zalloc_canblock( if (retval == KERN_SUCCESS) { zone_page_init(space, alloc_size, ZONE_PAGE_USED); - zcram(zone, space, alloc_size); + zcram(zone, (void *)space, alloc_size); break; } else if (retval != KERN_RESOURCE_SHORTAGE) { /* would like to cause a zone_gc() */ if (retry == TRUE) - panic("zalloc"); + panic("zalloc: \"%s\" (%d elements) retry fail %d", zone->zone_name, zone->count, retval); retry = TRUE; } else { break; @@ -720,7 +760,7 @@ zalloc_canblock( if (zone_debug_enabled(zone)) space += ZONE_DEBUG_OFFSET; #endif - return(space); + return((void *)space); } if (retval == KERN_RESOURCE_SHORTAGE) { unlock_zone(zone); @@ -728,7 +768,7 @@ zalloc_canblock( VM_PAGE_WAIT(); lock_zone(zone); } else { - panic("zalloc"); + panic("zalloc: \"%s\" (%d elements) zget_space returned %d", zone->zone_name, zone->count, retval); } } } @@ -753,18 +793,18 @@ zalloc_canblock( unlock_zone(zone); - return(addr); + return((void *)addr); } -vm_offset_t +void * zalloc( register zone_t zone) { return( zalloc_canblock(zone, TRUE) ); } -vm_offset_t +void * zalloc_noblock( register zone_t zone) { @@ -773,10 +813,10 @@ zalloc_noblock( void zalloc_async( - thread_call_param_t p0, - thread_call_param_t p1) + thread_call_param_t p0, + __unused thread_call_param_t p1) { - vm_offset_t elt; + void *elt; elt = zalloc_canblock((zone_t)p0, TRUE); zfree((zone_t)p0, elt); @@ -793,7 +833,7 @@ zalloc_async( * This form should be used when you can not block (like when * processing an interrupt). */ -vm_offset_t +void * zget( register zone_t zone) { @@ -802,7 +842,7 @@ zget( assert( zone != ZONE_NULL ); if (!lock_try_zone(zone)) - return ((vm_offset_t)0); + return NULL; REMOVE_FROM_ZONE(zone, addr, vm_offset_t); #if ZONE_DEBUG @@ -813,7 +853,7 @@ zget( #endif /* ZONE_DEBUG */ unlock_zone(zone); - return(addr); + return((void *) addr); } /* Keep this FALSE by default. Large memory machine run orders of magnitude @@ -826,8 +866,9 @@ static vm_offset_t zone_last_bogus_elem = 0; void zfree( register zone_t zone, - vm_offset_t elem) + void *addr) { + vm_offset_t elem = (vm_offset_t) addr; #if MACH_ASSERT /* Basic sanity checks */ @@ -842,11 +883,10 @@ zfree( !from_zone_map(elem, zone->elem_size)) { #if MACH_ASSERT panic("zfree: non-allocated memory in collectable zone!"); -#else +#endif zone_last_bogus_zone = zone; zone_last_bogus_elem = elem; return; -#endif } lock_zone(zone); @@ -965,7 +1005,7 @@ zprealloc( if (kmem_alloc_wired(zone_map, &addr, size) != KERN_SUCCESS) panic("zprealloc"); zone_page_init(addr, size, ZONE_PAGE_USED); - zcram(zone, addr, size); + zcram(zone, (void *)addr, size); } } @@ -1177,9 +1217,13 @@ zone_gc(void) /* * Do a quick feasability check before we scan the zone: - * skip unless there is likelihood of getting 1+ pages back. + * skip unless there is likelihood of getting pages back + * (i.e we need a whole allocation block's worth of free + * elements before we can garbage collect) and + * the zone has more than 10 percent of it's elements free */ - if (z->cur_size - z->count * elt_size <= 2 * PAGE_SIZE){ + if (((z->cur_size - z->count * elt_size) <= (2 * z->alloc_size)) || + ((z->cur_size - z->count * elt_size) <= (z->cur_size / 10))) { unlock_zone(z); continue; } @@ -1390,11 +1434,11 @@ consider_zone_gc(void) { /* * By default, don't attempt zone GC more frequently - * than once / 2 seconds. + * than once / 1 minutes. */ if (zone_gc_max_rate == 0) - zone_gc_max_rate = (2 << SCHED_TICK_SHIFT) + 1; + zone_gc_max_rate = (60 << SCHED_TICK_SHIFT) + 1; if (zone_gc_allowed && ((sched_tick > (zone_gc_last_tick + zone_gc_max_rate)) || @@ -1405,14 +1449,6 @@ consider_zone_gc(void) } } -#include -#include -#include -#include -#include -#include - -#include kern_return_t host_zone_info( @@ -1453,10 +1489,10 @@ host_zone_info( if (max_zones <= *namesCntp) { /* use in-line memory */ - + names_size = *namesCntp * sizeof *names; names = *namesp; } else { - names_size = round_page_32(max_zones * sizeof *names); + names_size = round_page(max_zones * sizeof *names); kr = kmem_alloc_pageable(ipc_kernel_map, &names_addr, names_size); if (kr != KERN_SUCCESS) @@ -1466,10 +1502,10 @@ host_zone_info( if (max_zones <= *infoCntp) { /* use in-line memory */ - + info_size = *infoCntp * sizeof *info; info = *infop; } else { - info_size = round_page_32(max_zones * sizeof *info); + info_size = round_page(max_zones * sizeof *info); kr = kmem_alloc_pageable(ipc_kernel_map, &info_addr, info_size); if (kr != KERN_SUCCESS) { @@ -1543,8 +1579,8 @@ host_zone_info( if (used != names_size) bzero((char *) (names_addr + used), names_size - used); - kr = vm_map_copyin(ipc_kernel_map, names_addr, names_size, - TRUE, ©); + kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr, + (vm_map_size_t)names_size, TRUE, ©); assert(kr == KERN_SUCCESS); *namesp = (zone_name_t *) copy; @@ -1560,8 +1596,8 @@ host_zone_info( if (used != info_size) bzero((char *) (info_addr + used), info_size - used); - kr = vm_map_copyin(ipc_kernel_map, info_addr, info_size, - TRUE, ©); + kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr, + (vm_map_size_t)info_size, TRUE, ©); assert(kr == KERN_SUCCESS); *infop = (zone_info_t *) copy; @@ -1615,12 +1651,12 @@ db_print_zone( /*ARGSUSED*/ void db_show_one_zone( - db_expr_t addr, - int have_addr, - db_expr_t count, - char * modif) + db_expr_t addr, + int have_addr, + __unused db_expr_t count, + __unused char * modif) { - struct zone *z = (zone_t)addr; + struct zone *z = (zone_t)((char *)0 + addr); if (z == ZONE_NULL || !have_addr){ db_error("No Zone\n"); @@ -1634,10 +1670,10 @@ db_show_one_zone( /*ARGSUSED*/ void db_show_all_zones( - db_expr_t addr, - int have_addr, - db_expr_t count, - char * modif) + __unused db_expr_t addr, + int have_addr, + db_expr_t count, + __unused char * modif) { zone_t z; unsigned total = 0; @@ -1784,32 +1820,34 @@ db_zone_print_free( /* should we care about locks here ? */ #if MACH_KDB -vm_offset_t +void * next_element( zone_t z, - vm_offset_t elt) + void *prev) { + char *elt = (char *)prev; + if (!zone_debug_enabled(z)) return(0); elt -= ZONE_DEBUG_OFFSET; - elt = (vm_offset_t) queue_next((queue_t) elt); + elt = (char *) queue_next((queue_t) elt); if ((queue_t) elt == &z->active_zones) return(0); elt += ZONE_DEBUG_OFFSET; return(elt); } -vm_offset_t +void * first_element( zone_t z) { - vm_offset_t elt; + char *elt; if (!zone_debug_enabled(z)) return(0); if (queue_empty(&z->active_zones)) return(0); - elt = (vm_offset_t) queue_first(&z->active_zones); + elt = (char *)queue_first(&z->active_zones); elt += ZONE_DEBUG_OFFSET; return(elt); } @@ -1825,7 +1863,7 @@ zone_count( zone_t z, int tail) { - vm_offset_t elt; + void *elt; int count = 0; boolean_t print = (tail != 0);