/*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
zone_t kalloc_zone(vm_size_t);
#endif
+#define KALLOC_MAP_SIZE_MIN (16 * 1024 * 1024)
+#define KALLOC_MAP_SIZE_MAX (128 * 1024 * 1024)
vm_map_t kalloc_map;
-vm_size_t kalloc_map_size = 16 * 1024 * 1024;
vm_size_t kalloc_max;
vm_size_t kalloc_max_prerounded;
vm_size_t kalloc_kernmap_size; /* size of kallocs that can come from kernel map */
unsigned int kalloc_large_inuse;
vm_size_t kalloc_large_total;
vm_size_t kalloc_large_max;
-vm_size_t kalloc_largest_allocated = 0;
+volatile vm_size_t kalloc_largest_allocated = 0;
+
+vm_offset_t kalloc_map_min;
+vm_offset_t kalloc_map_max;
/*
* All allocations of size less than kalloc_max are rounded to the
{
kern_return_t retval;
vm_offset_t min;
- vm_size_t size;
+ vm_size_t size, kalloc_map_size;
register int i;
+ /*
+ * Scale the kalloc_map_size to physical memory size: stay below
+ * 1/8th the total zone map size, or 128 MB (for a 32-bit kernel).
+ */
+ kalloc_map_size = (vm_size_t)(sane_size >> 5);
+#if !__LP64__
+ if (kalloc_map_size > KALLOC_MAP_SIZE_MAX)
+ kalloc_map_size = KALLOC_MAP_SIZE_MAX;
+#endif /* !__LP64__ */
+ if (kalloc_map_size < KALLOC_MAP_SIZE_MIN)
+ kalloc_map_size = KALLOC_MAP_SIZE_MIN;
+
retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
- FALSE, VM_FLAGS_ANYWHERE, &kalloc_map);
+ FALSE, VM_FLAGS_ANYWHERE | VM_FLAGS_PERMANENT,
+ &kalloc_map);
if (retval != KERN_SUCCESS)
panic("kalloc_init: kmem_suballoc failed");
+ kalloc_map_min = min;
+ kalloc_map_max = min + kalloc_map_size - 1;
+
/*
* Ensure that zones up to size 8192 bytes exist.
* This is desirable because messages are allocated
kalloc_max_prerounded = kalloc_max / 2 + 1;
/* size it to be more than 16 times kalloc_max (256k) for allocations from kernel map */
kalloc_kernmap_size = (kalloc_max * 16) + 1;
+ kalloc_largest_allocated = kalloc_kernmap_size;
/*
* Allocate a zone for each size we are going to handle.
*/
for (i = 0, size = 1; size < kalloc_max; i++, size <<= 1) {
if (size < KALLOC_MINSIZE) {
- k_zone[i] = 0;
+ k_zone[i] = NULL;
continue;
}
if (size == KALLOC_MINSIZE) {
/*
* If size is too large for a zone, then use kmem_alloc.
- * (We use kmem_alloc instead of kmem_alloc_wired so that
+ * (We use kmem_alloc instead of kmem_alloc_kobject so that
* krealloc can use kmem_realloc.)
*/
/* kmem_alloc could block so we return if noblock */
if (!canblock) {
- return(0);
+ return(NULL);
}
- if (size >= kalloc_kernmap_size) {
- alloc_map = kernel_map;
-
- if (size > kalloc_largest_allocated)
- kalloc_largest_allocated = size;
+ if (size >= kalloc_kernmap_size) {
+ volatile vm_offset_t prev_largest;
+ alloc_map = kernel_map;
+ /* Thread-safe version of the workaround for 4740071
+ * (a double FREE())
+ */
+ do {
+ prev_largest = kalloc_largest_allocated;
+ } while ((size > prev_largest) && !OSCompareAndSwap((UInt32)prev_largest, (UInt32)size, (volatile UInt32 *) &kalloc_largest_allocated));
} else
alloc_map = kalloc_map;
- if (kmem_alloc(alloc_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS)
- addr = 0;
+ if (kmem_alloc(alloc_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS) {
+ if (alloc_map != kernel_map) {
+ if (kmem_alloc(kernel_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS)
+ addr = NULL;
+ }
+ else
+ addr = NULL;
+ }
- if (addr) {
+ if (addr != NULL) {
kalloc_large_inuse++;
kalloc_large_total += size;
if (KERN_SUCCESS != kmem_realloc(alloc_map,
(vm_offset_t)*addrp, old_size,
- (vm_offset_t *)&naddr, new_size)) {
+ (vm_offset_t *)&naddr, new_size))
panic("krealloc: kmem_realloc");
- naddr = 0;
- }
simple_lock(lock);
*addrp = (void *) naddr;
return(zget(k_zone[zindex]));
}
+volatile SInt32 kfree_nop_count = 0;
+
void
kfree(
void *data,
{
register int zindex;
register vm_size_t freesize;
- vm_map_t alloc_map = VM_MAP_NULL;
+ vm_map_t alloc_map = kernel_map;
/* if size was too large for a zone, then use kmem_free */
if (size >= kalloc_max_prerounded) {
- if (size >= kalloc_kernmap_size) {
- alloc_map = kernel_map;
-
- if (size > kalloc_largest_allocated)
+ if ((((vm_offset_t) data) >= kalloc_map_min) && (((vm_offset_t) data) <= kalloc_map_max))
+ alloc_map = kalloc_map;
+ if (size > kalloc_largest_allocated) {
/*
* work around double FREEs of small MALLOCs
- * this used to end up being a nop
+ * this use to end up being a nop
* since the pointer being freed from an
* alloc backed by the zalloc world could
* never show up in the kalloc_map... however,
* to the above scenario, but it would still be wrong and
* cause serious damage.
*/
+
+ OSAddAtomic(1, &kfree_nop_count);
return;
- } else
- alloc_map = kalloc_map;
+ }
kmem_free(alloc_map, (vm_offset_t)data, size);
kalloc_large_total -= size;
if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
- (void)hw_atomic_add((uint32_t *)(&tag->OSMT_refcnt), 1);
+ (void)hw_atomic_add(&tag->OSMT_refcnt, 1);
}
void
if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
- if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
+ if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) {
if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) {
simple_lock(&OSMalloc_tag_lock);
(void)remque((queue_entry_t)tag);
if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state))
panic("OSMalloc_Tagfree(): bad state 0x%08X\n", tag->OSMT_state);
- if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
+ if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) {
simple_lock(&OSMalloc_tag_lock);
(void)remque((queue_entry_t)tag);
simple_unlock(&OSMalloc_tag_lock);
&& (size & ~PAGE_MASK)) {
if ((kr = kmem_alloc_pageable(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS)
- panic("OSMalloc(): kmem_alloc_pageable() failed 0x%08X\n", kr);
+ addr = NULL;
} else
addr = kalloc((vm_size_t)size);
+ if (!addr)
+ OSMalloc_Tagrele(tag);
+
return(addr);
}