vm_size_t kalloc_map_size = 16 * 1024 * 1024;
vm_size_t kalloc_max;
vm_size_t kalloc_max_prerounded;
+vm_size_t kalloc_kernmap_size; /* size of kallocs that can come from kernel map */
unsigned int kalloc_large_inuse;
vm_size_t kalloc_large_total;
vm_size_t kalloc_large_max;
+vm_size_t kalloc_largest_allocated = 0;
/*
* All allocations of size less than kalloc_max are rounded to the
* It represents the first power of two for which no zone exists.
* kalloc_max_prerounded is the smallest allocation size, before
* rounding, for which no zone exists.
+ * Also if the allocation size is more than kalloc_kernmap_size
+ * then allocate from kernel map rather than kalloc_map.
*/
int first_k_zone = -1;
else
kalloc_max = PAGE_SIZE;
kalloc_max_prerounded = kalloc_max / 2 + 1;
+ /* size it to be more than 16 times kalloc_max (256k) for allocations from kernel map */
+ kalloc_kernmap_size = (kalloc_max * 16) + 1;
/*
* Allocate a zone for each size we are going to handle.
{
register int zindex;
register vm_size_t allocsize;
+ vm_map_t alloc_map = VM_MAP_NULL;
/*
* If size is too large for a zone, then use kmem_alloc.
if (!canblock) {
return(0);
}
- if (kmem_alloc(kalloc_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS)
+
+ if (size >= kalloc_kernmap_size) {
+ alloc_map = kernel_map;
+
+ if (size > kalloc_largest_allocated)
+ kalloc_largest_allocated = size;
+ } else
+ alloc_map = kalloc_map;
+
+ if (kmem_alloc(alloc_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS)
addr = 0;
if (addr) {
register int zindex;
register vm_size_t allocsize;
void *naddr;
+ vm_map_t alloc_map = VM_MAP_NULL;
/* can only be used for increasing allocation size */
/* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
if (old_size >= kalloc_max_prerounded) {
+ if (old_size >= kalloc_kernmap_size)
+ alloc_map = kernel_map;
+ else
+ alloc_map = kalloc_map;
+
old_size = round_page(old_size);
new_size = round_page(new_size);
if (new_size > old_size) {
- if (KERN_SUCCESS != kmem_realloc(kalloc_map,
+ if (KERN_SUCCESS != kmem_realloc(alloc_map,
(vm_offset_t)*addrp, old_size,
(vm_offset_t *)&naddr, new_size)) {
panic("krealloc: kmem_realloc");
*addrp = (void *) naddr;
/* kmem_realloc() doesn't free old page range. */
- kmem_free(kalloc_map, (vm_offset_t)*addrp, old_size);
+ kmem_free(alloc_map, (vm_offset_t)*addrp, old_size);
kalloc_large_total += (new_size - old_size);
simple_unlock(lock);
if (new_size >= kalloc_max_prerounded) {
- if (KERN_SUCCESS != kmem_alloc(kalloc_map,
+ if (new_size >= kalloc_kernmap_size)
+ alloc_map = kernel_map;
+ else
+ alloc_map = kalloc_map;
+ if (KERN_SUCCESS != kmem_alloc(alloc_map,
(vm_offset_t *)&naddr, new_size)) {
panic("krealloc: kmem_alloc");
simple_lock(lock);
{
register int zindex;
register vm_size_t freesize;
+ vm_map_t alloc_map = VM_MAP_NULL;
/* if size was too large for a zone, then use kmem_free */
if (size >= kalloc_max_prerounded) {
- kmem_free(kalloc_map, (vm_offset_t)data, size);
+ if (size >= kalloc_kernmap_size) {
+ alloc_map = kernel_map;
+
+ if (size > kalloc_largest_allocated)
+ /*
+ * work around double FREEs of small MALLOCs
+ * this used to end up being a nop
+ * since the pointer being freed from an
+ * alloc backed by the zalloc world could
+ * never show up in the kalloc_map... however,
+ * the kernel_map is a different issue... since it
+ * was released back into the zalloc pool, a pointer
+ * would have gotten written over the 'size' that
+ * the MALLOC was retaining in the first 4 bytes of
+ * the underlying allocation... that pointer ends up
+ * looking like a really big size on the 2nd FREE and
+ * pushes the kfree into the kernel_map... we
+ * end up removing a ton of virutal space before we panic
+ * this check causes us to ignore the kfree for a size
+ * that must be 'bogus'... note that it might not be due
+ * to the above scenario, but it would still be wrong and
+ * cause serious damage.
+ */
+ return;
+ } else
+ alloc_map = kalloc_map;
+ kmem_free(alloc_map, (vm_offset_t)data, size);
kalloc_large_total -= size;
kalloc_large_inuse--;