*
* @APPLE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
((size-1) % sizeof(z->free_elements));
if (alloc == 0)
alloc = PAGE_SIZE;
- alloc = round_page(alloc);
- max = round_page(max);
+ alloc = round_page_32(alloc);
+ max = round_page_32(max);
/*
* We look for an allocation size with least fragmentation
* in the range of 1 - 5 pages. This size will be used unless
* Add at least one page to allocation area.
*/
- space_to_add = round_page(size);
+ space_to_add = round_page_32(size);
if (new_space == 0) {
kern_return_t retval;
void
zone_steal_memory(void)
{
- zdata_size = round_page(128*sizeof(struct zone));
+ zdata_size = round_page_32(128*sizeof(struct zone));
zdata = pmap_steal_memory(zdata_size);
}
if (nelem <= 0)
return 0;
size = nelem * zone->elem_size;
- size = round_page(size);
+ size = round_page_32(size);
kr = kmem_alloc_wired(kernel_map, &memory, size);
if (kr != KERN_SUCCESS)
return 0;
FALSE, TRUE, &zone_map);
if (retval != KERN_SUCCESS)
panic("zone_init: kmem_suballoc failed");
- zone_max = zone_min + round_page(max_zonemap_size);
+ zone_max = zone_min + round_page_32(max_zonemap_size);
/*
* Setup garbage collection information:
*/
- zone_table_size = atop(zone_max - zone_min) *
+ zone_table_size = atop_32(zone_max - zone_min) *
sizeof(struct zone_page_table_entry);
if (kmem_alloc_wired(zone_map, (vm_offset_t *) &zone_page_table,
zone_table_size) != KERN_SUCCESS)
panic("zone_init");
- zone_min = (vm_offset_t)zone_page_table + round_page(zone_table_size);
- zone_pages = atop(zone_max - zone_min);
+ zone_min = (vm_offset_t)zone_page_table + round_page_32(zone_table_size);
+ zone_pages = atop_32(zone_max - zone_min);
zone_map_min_address = zone_min;
zone_map_max_address = zone_max;
simple_lock_init(&zone_page_table_lock, ETAP_MISC_ZONE_PTABLE);
if (vm_pool_low())
alloc_size =
- round_page(zone->elem_size);
+ round_page_32(zone->elem_size);
else
alloc_size = zone->alloc_size;
panic("zone_page_collectable");
#endif
- i = atop(addr-zone_map_min_address);
- j = atop((addr+size-1) - zone_map_min_address);
+ i = atop_32(addr-zone_map_min_address);
+ j = atop_32((addr+size-1) - zone_map_min_address);
lock_zone_page_table();
for (; i <= j; i++) {
if (zone_page_table[i].in_free_list ==
panic("zone_page_keep");
#endif
- i = atop(addr-zone_map_min_address);
- j = atop((addr+size-1) - zone_map_min_address);
+ i = atop_32(addr-zone_map_min_address);
+ j = atop_32((addr+size-1) - zone_map_min_address);
lock_zone_page_table();
for (; i <= j; i++) {
zone_page_table[i].in_free_list = 0;
panic("zone_page_in_use");
#endif
- i = atop(addr-zone_map_min_address);
- j = atop((addr+size-1) - zone_map_min_address);
+ i = atop_32(addr-zone_map_min_address);
+ j = atop_32((addr+size-1) - zone_map_min_address);
lock_zone_page_table();
for (; i <= j; i++) {
if (zone_page_table[i].in_free_list > 0)
panic("zone_page_free");
#endif
- i = atop(addr-zone_map_min_address);
- j = atop((addr+size-1) - zone_map_min_address);
+ i = atop_32(addr-zone_map_min_address);
+ j = atop_32((addr+size-1) - zone_map_min_address);
lock_zone_page_table();
for (; i <= j; i++) {
assert(zone_page_table[i].in_free_list >= 0);
panic("zone_page_init");
#endif
- i = atop(addr-zone_map_min_address);
- j = atop((addr+size-1) - zone_map_min_address);
+ i = atop_32(addr-zone_map_min_address);
+ j = atop_32((addr+size-1) - zone_map_min_address);
lock_zone_page_table();
for (; i <= j; i++) {
zone_page_table[i].alloc_count = value;
panic("zone_page_alloc");
#endif
- i = atop(addr-zone_map_min_address);
- j = atop((addr+size-1) - zone_map_min_address);
+ i = atop_32(addr-zone_map_min_address);
+ j = atop_32((addr+size-1) - zone_map_min_address);
lock_zone_page_table();
for (; i <= j; i++) {
/* Set alloc_count to (ZONE_PAGE_USED + 1) if
panic("zone_page_dealloc");
#endif
- i = atop(addr-zone_map_min_address);
- j = atop((addr+size-1) - zone_map_min_address);
+ i = atop_32(addr-zone_map_min_address);
+ j = atop_32((addr+size-1) - zone_map_min_address);
lock_zone_page_table();
for (; i <= j; i++) {
zone_page_table[i].alloc_count--;
panic("zone_add_free_page_list");
#endif
- i = atop(addr-zone_map_min_address);
- j = atop((addr+size-1) - zone_map_min_address);
+ i = atop_32(addr-zone_map_min_address);
+ j = atop_32((addr+size-1) - zone_map_min_address);
lock_zone_page_table();
for (; i <= j; i++) {
if (zone_page_table[i].alloc_count == 0) {
names = *namesp;
} else {
- names_size = round_page(max_zones * sizeof *names);
+ names_size = round_page_32(max_zones * sizeof *names);
kr = kmem_alloc_pageable(ipc_kernel_map,
&names_addr, names_size);
if (kr != KERN_SUCCESS)
info = *infop;
} else {
- info_size = round_page(max_zones * sizeof *info);
+ info_size = round_page_32(max_zones * sizeof *info);
kr = kmem_alloc_pageable(ipc_kernel_map,
&info_addr, info_size);
if (kr != KERN_SUCCESS) {