]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_kern.c
xnu-2422.100.13.tar.gz
[apple/xnu.git] / osfmk / vm / vm_kern.c
index 68f203028ea8809e08703e08f4ede8ddf7e2abd5..65e48ae7d437ae798eb127317f3cbd468bc0db0c 100644 (file)
@@ -1,31 +1,29 @@
 /*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * This file contains Original Code and/or Modifications of Original Code 
- * as defined in and that are subject to the Apple Public Source License 
- * Version 2.0 (the 'License'). You may not use this file except in 
- * compliance with the License.  The rights granted to you under the 
- * License may not be used to create, or enable the creation or 
- * redistribution of, unlawful or unlicensed copies of an Apple operating 
- * system, or to circumvent, violate, or enable the circumvention or 
- * violation of, any terms of an Apple operating system software license 
- * agreement.
- *
- * Please obtain a copy of the License at 
- * http://www.opensource.apple.com/apsl/ and read it before using this 
- * file.
- *
- * The Original Code and all software distributed under the License are 
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
- * Please see the License for the specific language governing rights and 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ * 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
  * limitations under the License.
- *
- * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
+ * 
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
 #include <vm/cpm.h>
 
 #include <string.h>
+
+#include <libkern/OSDebug.h>
+#include <sys/kdebug.h>
+
 /*
  *     Variables exported by this module.
  */
@@ -86,6 +88,8 @@
 vm_map_t       kernel_map;
 vm_map_t       kernel_pageable_map;
 
+extern boolean_t vm_kernel_ready;
+
 /*
  * Forward declarations for internal functions.
  */
@@ -107,6 +111,8 @@ kmem_alloc_contig(
        vm_offset_t             *addrp,
        vm_size_t               size,
        vm_offset_t             mask,
+       ppnum_t                 max_pnum,
+       ppnum_t                 pnum_mask,
        int                     flags)
 {
        vm_object_t             object;
@@ -118,17 +124,19 @@ kmem_alloc_contig(
        vm_page_t               m, pages;
        kern_return_t           kr;
 
-       if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT))) 
+       if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT))) 
                return KERN_INVALID_ARGUMENT;
+
+       map_size = vm_map_round_page(size,
+                                    VM_MAP_PAGE_MASK(map));
+       map_mask = (vm_map_offset_t)mask;
        
-       if (size == 0) {
+       /* Check for zero allocation size (either directly or via overflow) */
+       if (map_size == 0) {
                *addrp = 0;
                return KERN_INVALID_ARGUMENT;
        }
 
-       map_size = vm_map_round_page(size);
-       map_mask = (vm_map_offset_t)mask;
-
        /*
         *      Allocate a new object (if necessary) and the reference we
         *      will be donating to the map entry.  We must do this before
@@ -149,17 +157,21 @@ kmem_alloc_contig(
 
        entry->object.vm_object = object;
        entry->offset = offset = (object == kernel_object) ? 
-                       map_addr - VM_MIN_KERNEL_ADDRESS : 0;
+                       map_addr : 0;
 
        /* Take an extra object ref in case the map entry gets deleted */
        vm_object_reference(object);
        vm_map_unlock(map);
 
-       kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, FALSE);
+       kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, max_pnum, pnum_mask, FALSE, flags);
 
        if (kr != KERN_SUCCESS) {
-               vm_map_remove(map, vm_map_trunc_page(map_addr),
-                             vm_map_round_page(map_addr + map_size), 0);
+               vm_map_remove(map,
+                             vm_map_trunc_page(map_addr,
+                                               VM_MAP_PAGE_MASK(map)),
+                             vm_map_round_page(map_addr + map_size,
+                                               VM_MAP_PAGE_MASK(map)),
+                             0);
                vm_object_deallocate(object);
                *addrp = 0;
                return kr;
@@ -175,16 +187,25 @@ kmem_alloc_contig(
        }
        vm_object_unlock(object);
 
-       if ((kr = vm_map_wire(map, vm_map_trunc_page(map_addr),
-                             vm_map_round_page(map_addr + map_size), VM_PROT_DEFAULT, FALSE)) 
-               != KERN_SUCCESS) {
+       kr = vm_map_wire(map,
+                        vm_map_trunc_page(map_addr,
+                                          VM_MAP_PAGE_MASK(map)),
+                        vm_map_round_page(map_addr + map_size,
+                                          VM_MAP_PAGE_MASK(map)),
+                        VM_PROT_DEFAULT,
+                        FALSE);
+       if (kr != KERN_SUCCESS) {
                if (object == kernel_object) {
                        vm_object_lock(object);
                        vm_object_page_remove(object, offset, offset + map_size);
                        vm_object_unlock(object);
                }
-               vm_map_remove(map, vm_map_trunc_page(map_addr), 
-                             vm_map_round_page(map_addr + map_size), 0);
+               vm_map_remove(map,
+                             vm_map_trunc_page(map_addr,
+                                               VM_MAP_PAGE_MASK(map)), 
+                             vm_map_round_page(map_addr + map_size,
+                                               VM_MAP_PAGE_MASK(map)),
+                             0);
                vm_object_deallocate(object);
                return kr;
        }
@@ -193,7 +214,8 @@ kmem_alloc_contig(
        if (object == kernel_object)
                vm_map_simplify(map, map_addr);
 
-       *addrp = map_addr;
+       *addrp = (vm_offset_t) map_addr;
+       assert((vm_map_offset_t) *addrp == map_addr);
        return KERN_SUCCESS;
 }
 
@@ -224,26 +246,137 @@ kernel_memory_allocate(
 {
        vm_object_t             object;
        vm_object_offset_t      offset;
-       vm_map_entry_t          entry;
-       vm_map_offset_t         map_addr;
+       vm_object_offset_t      pg_offset;
+       vm_map_entry_t          entry = NULL;
+       vm_map_offset_t         map_addr, fill_start;
        vm_map_offset_t         map_mask;
-       vm_map_size_t           map_size;
-       vm_map_size_t           i;
-       kern_return_t           kr;
+       vm_map_size_t           map_size, fill_size;
+       kern_return_t           kr, pe_result;
+       vm_page_t               mem;
+       vm_page_t               guard_page_list = NULL;
+       vm_page_t               wired_page_list = NULL;
+       int                     guard_page_count = 0;
+       int                     wired_page_count = 0;
+       int                     i;
+       int                     vm_alloc_flags;
+       vm_prot_t               kma_prot;
+
+       if (! vm_kernel_ready) {
+               panic("kernel_memory_allocate: VM is not ready");
+       }
+
+       map_size = vm_map_round_page(size,
+                                    VM_MAP_PAGE_MASK(map));
+       map_mask = (vm_map_offset_t) mask;
+       vm_alloc_flags = 0;
 
-       if (size == 0) {
+       /* Check for zero allocation size (either directly or via overflow) */
+       if (map_size == 0) {
                *addrp = 0;
                return KERN_INVALID_ARGUMENT;
        }
-       if (flags & KMA_LOMEM) {
-               if ( !(flags & KMA_NOPAGEWAIT) ) {
-                       *addrp = 0;
-                       return KERN_INVALID_ARGUMENT;
+
+       /*
+        * limit the size of a single extent of wired memory
+        * to try and limit the damage to the system if
+        * too many pages get wired down
+        * limit raised to 2GB with 128GB max physical limit
+        */
+        if (map_size > (1ULL << 31)) {
+                return KERN_RESOURCE_SHORTAGE;
+        }
+
+       /*
+        * Guard pages:
+        *
+        * Guard pages are implemented as ficticious pages.  By placing guard pages
+        * on either end of a stack, they can help detect cases where a thread walks
+        * off either end of its stack.  They are allocated and set up here and attempts
+        * to access those pages are trapped in vm_fault_page().
+        *
+        * The map_size we were passed may include extra space for
+        * guard pages.  If those were requested, then back it out of fill_size
+        * since vm_map_find_space() takes just the actual size not including
+        * guard pages.  Similarly, fill_start indicates where the actual pages
+        * will begin in the range.
+        */
+
+       fill_start = 0;
+       fill_size = map_size;
+
+       if (flags & KMA_GUARD_FIRST) {
+               vm_alloc_flags |= VM_FLAGS_GUARD_BEFORE;
+               fill_start += PAGE_SIZE_64;
+               fill_size -= PAGE_SIZE_64;
+               if (map_size < fill_start + fill_size) {
+                       /* no space for a guard page */
+                       *addrp = 0;
+                       return KERN_INVALID_ARGUMENT;
+               }
+               guard_page_count++;
+       }
+       if (flags & KMA_GUARD_LAST) {
+               vm_alloc_flags |= VM_FLAGS_GUARD_AFTER;
+               fill_size -= PAGE_SIZE_64;
+               if (map_size <= fill_start + fill_size) {
+                       /* no space for a guard page */
+                       *addrp = 0;
+                       return KERN_INVALID_ARGUMENT;
                }
+               guard_page_count++;
        }
+       wired_page_count = (int) (fill_size / PAGE_SIZE_64);
+       assert(wired_page_count * PAGE_SIZE_64 == fill_size);
 
-       map_size = vm_map_round_page(size);
-       map_mask = (vm_map_offset_t) mask;
+       for (i = 0; i < guard_page_count; i++) {
+               for (;;) {
+                       mem = vm_page_grab_guard();
+
+                       if (mem != VM_PAGE_NULL)
+                               break;
+                       if (flags & KMA_NOPAGEWAIT) {
+                               kr = KERN_RESOURCE_SHORTAGE;
+                               goto out;
+                       }
+                       vm_page_more_fictitious();
+               }
+               mem->pageq.next = (queue_entry_t)guard_page_list;
+               guard_page_list = mem;
+       }
+
+       if (! (flags & KMA_VAONLY)) {
+       for (i = 0; i < wired_page_count; i++) {
+               uint64_t        unavailable;
+               
+               for (;;) {
+                       if (flags & KMA_LOMEM)
+                               mem = vm_page_grablo();
+                       else
+                               mem = vm_page_grab();
+
+                       if (mem != VM_PAGE_NULL)
+                               break;
+
+                       if (flags & KMA_NOPAGEWAIT) {
+                               kr = KERN_RESOURCE_SHORTAGE;
+                               goto out;
+                       }
+                       if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
+                               kr = KERN_RESOURCE_SHORTAGE;
+                               goto out;
+                       }
+                       unavailable = (vm_page_wire_count + vm_page_free_target) * PAGE_SIZE;
+
+                       if (unavailable > max_mem || map_size > (max_mem - unavailable)) {
+                               kr = KERN_RESOURCE_SHORTAGE;
+                               goto out;
+                       }
+                       VM_PAGE_WAIT();
+               }
+               mem->pageq.next = (queue_entry_t)wired_page_list;
+               wired_page_list = mem;
+       }
+       }
 
        /*
         *      Allocate a new object (if necessary).  We must do this before
@@ -252,72 +385,374 @@ kernel_memory_allocate(
        if ((flags & KMA_KOBJECT) != 0) {
                object = kernel_object;
                vm_object_reference(object);
+       } else if ((flags & KMA_COMPRESSOR) != 0) {
+               object = compressor_object;
+               vm_object_reference(object);
        } else {
                object = vm_object_allocate(map_size);
        }
 
-       kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
+       kr = vm_map_find_space(map, &map_addr,
+                              fill_size, map_mask,
+                              vm_alloc_flags, &entry);
        if (KERN_SUCCESS != kr) {
                vm_object_deallocate(object);
-               return kr;
+               goto out;
        }
+
        entry->object.vm_object = object;
-       entry->offset = offset = (object == kernel_object) ? 
-                       map_addr - VM_MIN_KERNEL_ADDRESS : 0;
+       entry->offset = offset = (object == kernel_object || object == compressor_object) ? 
+                       map_addr : 0;
+       
+       if (object != compressor_object)
+               entry->wired_count++;
 
-       vm_object_reference(object);
-       vm_map_unlock(map);
+       if (flags & KMA_PERMANENT)
+               entry->permanent = TRUE;
+
+       if (object != kernel_object && object != compressor_object)
+               vm_object_reference(object);
 
        vm_object_lock(object);
-       for (i = 0; i < map_size; i += PAGE_SIZE) {
-               vm_page_t       mem;
+       vm_map_unlock(map);
 
-               for (;;) {
-                       if (flags & KMA_LOMEM)
-                               mem = vm_page_alloclo(object, offset + i);
-                       else
-                               mem = vm_page_alloc(object, offset + i);
+       pg_offset = 0;
 
-                       if (mem != VM_PAGE_NULL)
-                               break;
+       if (fill_start) {
+               if (guard_page_list == NULL)
+                       panic("kernel_memory_allocate: guard_page_list == NULL");
 
-                       if (flags & KMA_NOPAGEWAIT) {
-                               if (object == kernel_object)
-                                       vm_object_page_remove(object, offset, offset + i);
-                               vm_object_unlock(object);
-                               vm_map_remove(map, map_addr, map_addr + map_size, 0);
-                               vm_object_deallocate(object);
-                               return KERN_RESOURCE_SHORTAGE;
-                       }
+               mem = guard_page_list;
+               guard_page_list = (vm_page_t)mem->pageq.next;
+               mem->pageq.next = NULL;
+
+               vm_page_insert(mem, object, offset + pg_offset);
+
+               mem->busy = FALSE;
+               pg_offset += PAGE_SIZE_64;
+       }
+
+       kma_prot = VM_PROT_READ | VM_PROT_WRITE;
+
+       if (flags & KMA_VAONLY) {
+               pg_offset = fill_start + fill_size;
+       } else {
+       for (pg_offset = fill_start; pg_offset < fill_start + fill_size; pg_offset += PAGE_SIZE_64) {
+               if (wired_page_list == NULL)
+                       panic("kernel_memory_allocate: wired_page_list == NULL");
+
+               mem = wired_page_list;
+               wired_page_list = (vm_page_t)mem->pageq.next;
+               mem->pageq.next = NULL;
+               mem->wire_count++;
+
+               vm_page_insert(mem, object, offset + pg_offset);
+
+               mem->busy = FALSE;
+               mem->pmapped = TRUE;
+               mem->wpmapped = TRUE;
+
+               PMAP_ENTER_OPTIONS(kernel_pmap, map_addr + pg_offset, mem,
+                                  kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+                                  PMAP_OPTIONS_NOWAIT, pe_result);
+
+               if (pe_result == KERN_RESOURCE_SHORTAGE) {
                        vm_object_unlock(object);
-                       VM_PAGE_WAIT();
+
+                       PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem, 
+                                  kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE);
+
                        vm_object_lock(object);
                }
+               if (flags & KMA_NOENCRYPT) {
+                       bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE);
+
+                       pmap_set_noencrypt(mem->phys_page);
+               }
+       }
+       }
+       if ((fill_start + fill_size) < map_size) {
+               if (guard_page_list == NULL)
+                       panic("kernel_memory_allocate: guard_page_list == NULL");
+
+               mem = guard_page_list;
+               guard_page_list = (vm_page_t)mem->pageq.next;
+               mem->pageq.next = NULL;
+
+               vm_page_insert(mem, object, offset + pg_offset);
+
                mem->busy = FALSE;
        }
-       vm_object_unlock(object);
+       if (guard_page_list || wired_page_list)
+               panic("kernel_memory_allocate: non empty list\n");
 
-       if ((kr = vm_map_wire(map, map_addr, map_addr + map_size, VM_PROT_DEFAULT, FALSE)) 
-               != KERN_SUCCESS) {
-               if (object == kernel_object) {
-                       vm_object_lock(object);
-                       vm_object_page_remove(object, offset, offset + map_size);
-                       vm_object_unlock(object);
-               }
-               vm_map_remove(map, map_addr, map_addr + map_size, 0);
-               vm_object_deallocate(object);
-               return (kr);
+       if (! (flags & KMA_VAONLY)) {
+       vm_page_lockspin_queues();
+       vm_page_wire_count += wired_page_count;
+       vm_page_unlock_queues();
        }
-       /* now that the page is wired, we no longer have to fear coalesce */
-       vm_object_deallocate(object);
-       if (object == kernel_object)
+
+       vm_object_unlock(object);
+
+       /*
+        * now that the pages are wired, we no longer have to fear coalesce
+        */
+       if (object == kernel_object || object == compressor_object)
                vm_map_simplify(map, map_addr);
+       else
+               vm_object_deallocate(object);
 
        /*
         *      Return the memory, not zeroed.
         */
        *addrp = CAST_DOWN(vm_offset_t, map_addr);
        return KERN_SUCCESS;
+
+out:
+       if (guard_page_list)
+               vm_page_free_list(guard_page_list, FALSE);
+
+       if (wired_page_list)
+               vm_page_free_list(wired_page_list, FALSE);
+
+       return kr;
+}
+
+kern_return_t
+kernel_memory_populate(
+       vm_map_t        map,
+       vm_offset_t     addr,
+       vm_size_t       size,
+       int             flags)
+{
+       vm_object_t             object;
+       vm_object_offset_t      offset, pg_offset;
+       kern_return_t           kr, pe_result;
+       vm_page_t               mem;
+       vm_page_t               page_list = NULL;
+       int                     page_count = 0;
+       int                     i;
+
+       page_count = (int) (size / PAGE_SIZE_64);
+
+       assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
+
+       if (flags & KMA_COMPRESSOR) {
+
+               for (i = 0; i < page_count; i++) {
+                       for (;;) {
+                               mem = vm_page_grab();
+
+                               if (mem != VM_PAGE_NULL)
+                                       break;
+                               
+                               VM_PAGE_WAIT();
+                       }
+                       mem->pageq.next = (queue_entry_t) page_list;
+                       page_list = mem;
+               }
+               offset = addr;
+               object = compressor_object;
+
+               vm_object_lock(object);
+
+               for (pg_offset = 0;
+                    pg_offset < size;
+                    pg_offset += PAGE_SIZE_64) {
+
+                       mem = page_list;
+                       page_list = (vm_page_t) mem->pageq.next;
+                       mem->pageq.next = NULL;
+
+                       vm_page_insert(mem, object, offset + pg_offset);
+                       assert(mem->busy);
+
+                       PMAP_ENTER_OPTIONS(kernel_pmap, addr + pg_offset, mem,
+                                          VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
+                                          0, TRUE, PMAP_OPTIONS_NOWAIT, pe_result);
+
+                       if (pe_result == KERN_RESOURCE_SHORTAGE) {
+
+                               vm_object_unlock(object);
+
+                               PMAP_ENTER(kernel_pmap, addr + pg_offset, mem,
+                                          VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE);
+
+                               vm_object_lock(object);
+                       }
+                       mem->busy = FALSE;
+                       mem->pmapped = TRUE;
+                       mem->wpmapped = TRUE;
+                       mem->compressor = TRUE;
+               }
+               vm_object_unlock(object);
+
+               return KERN_SUCCESS;
+       }
+
+       for (i = 0; i < page_count; i++) {
+               for (;;) {
+                       if (flags & KMA_LOMEM)
+                               mem = vm_page_grablo();
+                       else
+                               mem = vm_page_grab();
+                       
+                       if (mem != VM_PAGE_NULL)
+                               break;
+
+                       if (flags & KMA_NOPAGEWAIT) {
+                               kr = KERN_RESOURCE_SHORTAGE;
+                               goto out;
+                       }
+                       if ((flags & KMA_LOMEM) &&
+                           (vm_lopage_needed == TRUE)) {
+                               kr = KERN_RESOURCE_SHORTAGE;
+                               goto out;
+                       }
+                       VM_PAGE_WAIT();
+               }
+               mem->pageq.next = (queue_entry_t) page_list;
+               page_list = mem;
+       }
+       if (flags & KMA_KOBJECT) {
+               offset = addr;
+               object = kernel_object;
+
+               vm_object_lock(object);
+       } else {
+               /*
+                * If it's not the kernel object, we need to:
+                *      lock map;
+                *      lookup entry;
+                *      lock object;
+                *      take reference on object;
+                *      unlock map;
+                */
+               panic("kernel_memory_populate(%p,0x%llx,0x%llx,0x%x): "
+                     "!KMA_KOBJECT",
+                     map, (uint64_t) addr, (uint64_t) size, flags);
+       }
+
+       for (pg_offset = 0;
+            pg_offset < size;
+            pg_offset += PAGE_SIZE_64) {
+
+               if (page_list == NULL)
+                       panic("kernel_memory_populate: page_list == NULL");
+
+               mem = page_list;
+               page_list = (vm_page_t) mem->pageq.next;
+               mem->pageq.next = NULL;
+
+               mem->wire_count++;
+
+               vm_page_insert(mem, object, offset + pg_offset);
+
+               mem->busy = FALSE;
+               mem->pmapped = TRUE;
+               mem->wpmapped = TRUE;
+
+               PMAP_ENTER_OPTIONS(kernel_pmap, addr + pg_offset, mem,
+                                  VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
+                                  ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+                                  PMAP_OPTIONS_NOWAIT, pe_result);
+
+               if (pe_result == KERN_RESOURCE_SHORTAGE) {
+
+                       vm_object_unlock(object);
+
+                       PMAP_ENTER(kernel_pmap, addr + pg_offset, mem,
+                                  VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
+                                  ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE);
+
+                       vm_object_lock(object);
+               }
+               if (flags & KMA_NOENCRYPT) {
+                       bzero(CAST_DOWN(void *, (addr + pg_offset)), PAGE_SIZE);
+                       pmap_set_noencrypt(mem->phys_page);
+               }
+       }
+       vm_page_lock_queues();
+       vm_page_wire_count += page_count;
+       vm_page_unlock_queues();
+
+       vm_object_unlock(object);
+
+       return KERN_SUCCESS;
+
+out:
+       if (page_list)
+               vm_page_free_list(page_list, FALSE);
+
+       return kr;
+}
+
+
+void
+kernel_memory_depopulate(
+       vm_map_t        map,
+       vm_offset_t     addr,
+       vm_size_t       size,
+       int             flags)
+{
+       vm_object_t             object;
+       vm_object_offset_t      offset, pg_offset;
+       vm_page_t               mem;
+       vm_page_t               local_freeq = NULL;
+
+       assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
+
+       if (flags & KMA_COMPRESSOR) {
+               offset = addr;
+               object = compressor_object;
+
+               vm_object_lock(object);
+       } else if (flags & KMA_KOBJECT) {
+               offset = addr;
+               object = kernel_object;
+
+               vm_object_lock(object);
+       } else {
+               offset = 0;
+               object = NULL;
+                /*
+                 * If it's not the kernel object, we need to:
+                 *      lock map;
+                 *      lookup entry;
+                 *      lock object;
+                 *      unlock map;
+                 */
+               panic("kernel_memory_depopulate(%p,0x%llx,0x%llx,0x%x): "
+                     "!KMA_KOBJECT",
+                     map, (uint64_t) addr, (uint64_t) size, flags);
+       }
+       pmap_protect(kernel_map->pmap, offset, offset + size, VM_PROT_NONE);
+
+       for (pg_offset = 0;
+            pg_offset < size;
+            pg_offset += PAGE_SIZE_64) {
+
+               mem = vm_page_lookup(object, offset + pg_offset);
+
+               assert(mem);
+
+               pmap_disconnect(mem->phys_page);
+
+               mem->busy = TRUE;
+
+               assert(mem->tabled);
+               vm_page_remove(mem, TRUE);
+               assert(mem->busy);
+
+               assert(mem->pageq.next == NULL &&
+                      mem->pageq.prev == NULL);
+               mem->pageq.next = (queue_entry_t)local_freeq;
+               local_freeq = mem;
+       }
+       vm_object_unlock(object);
+
+       if (local_freeq)
+               vm_page_free_list(local_freeq, TRUE);
 }
 
 /*
@@ -333,7 +768,9 @@ kmem_alloc(
        vm_offset_t     *addrp,
        vm_size_t       size)
 {
-       return kernel_memory_allocate(map, addrp, size, 0, 0);
+       kern_return_t kr = kernel_memory_allocate(map, addrp, size, 0, 0);
+       TRACE_MACHLEAKS(KMEM_ALLOC_CODE, KMEM_ALLOC_CODE_2, size, *addrp);
+       return kr;
 }
 
 /*
@@ -366,10 +803,13 @@ kmem_realloc(
        vm_page_t               mem;
        kern_return_t           kr;
 
-       oldmapmin = vm_map_trunc_page(oldaddr);
-       oldmapmax = vm_map_round_page(oldaddr + oldsize);
+       oldmapmin = vm_map_trunc_page(oldaddr,
+                                     VM_MAP_PAGE_MASK(map));
+       oldmapmax = vm_map_round_page(oldaddr + oldsize,
+                                     VM_MAP_PAGE_MASK(map));
        oldmapsize = oldmapmax - oldmapmin;
-       newmapsize = vm_map_round_page(newsize);
+       newmapsize = vm_map_round_page(newsize,
+                                      VM_MAP_PAGE_MASK(map));
 
 
        /*
@@ -393,9 +833,9 @@ kmem_realloc(
        /* attempt is made to realloc a kmem_alloc'd area       */
        vm_object_lock(object);
        vm_map_unlock(map);
-       if (object->size != oldmapsize)
+       if (object->vo_size != oldmapsize)
                panic("kmem_realloc");
-       object->size = newmapsize;
+       object->vo_size = newmapsize;
        vm_object_unlock(object);
 
        /* allocate the new pages while expanded portion of the */
@@ -414,12 +854,10 @@ kmem_realloc(
                for(offset = oldmapsize; 
                    offset < newmapsize; offset += PAGE_SIZE) {
                        if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
-                               vm_page_lock_queues();
-                               vm_page_free(mem);
-                               vm_page_unlock_queues();
+                               VM_PAGE_FREE(mem);
                        }
                }
-               object->size = oldmapsize;
+               object->vo_size = oldmapsize;
                vm_object_unlock(object);
                vm_object_deallocate(object);
                return kr;
@@ -440,12 +878,10 @@ kmem_realloc(
                vm_object_lock(object);
                for(offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) {
                        if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
-                               vm_page_lock_queues();
-                               vm_page_free(mem);
-                               vm_page_unlock_queues();
+                               VM_PAGE_FREE(mem);
                        }
                }
-               object->size = oldmapsize;
+               object->vo_size = oldmapsize;
                vm_object_unlock(object);
                vm_object_deallocate(object);
                return (kr);
@@ -457,7 +893,7 @@ kmem_realloc(
 }
 
 /*
- *     kmem_alloc_wired:
+ *     kmem_alloc_kobject:
  *
  *     Allocate wired-down memory in the kernel's address map
  *     or a submap.  The memory is not zero-filled.
@@ -468,7 +904,7 @@ kmem_realloc(
  */
 
 kern_return_t
-kmem_alloc_wired(
+kmem_alloc_kobject(
        vm_map_t        map,
        vm_offset_t     *addrp,
        vm_size_t       size)
@@ -479,7 +915,7 @@ kmem_alloc_wired(
 /*
  *     kmem_alloc_aligned:
  *
- *     Like kmem_alloc_wired, except that the memory is aligned.
+ *     Like kmem_alloc_kobject, except that the memory is aligned.
  *     The size should be a power-of-2.
  */
 
@@ -515,7 +951,8 @@ kmem_alloc_pageable(
 #else
        map_addr = vm_map_min(map);
 #endif
-       map_size = vm_map_round_page(size);
+       map_size = vm_map_round_page(size,
+                                    VM_MAP_PAGE_MASK(map));
 
        kr = vm_map_enter(map, &map_addr, map_size,
                          (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE,
@@ -533,7 +970,7 @@ kmem_alloc_pageable(
  *     kmem_free:
  *
  *     Release a region of kernel virtual memory allocated
- *     with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable,
+ *     with kmem_alloc, kmem_alloc_kobject, or kmem_alloc_pageable,
  *     and return the physical pages associated with that region.
  */
 
@@ -545,9 +982,23 @@ kmem_free(
 {
        kern_return_t kr;
 
-       kr = vm_map_remove(map, vm_map_trunc_page(addr),
-                               vm_map_round_page(addr + size), 
-                               VM_MAP_REMOVE_KUNWIRE);
+       assert(addr >= VM_MIN_KERNEL_AND_KEXT_ADDRESS);
+
+       TRACE_MACHLEAKS(KMEM_FREE_CODE, KMEM_FREE_CODE_2, size, addr);
+
+       if(size == 0) {
+#if MACH_ASSERT
+               printf("kmem_free called with size==0 for map: %p with addr: 0x%llx\n",map,(uint64_t)addr);
+#endif
+               return;
+       }
+
+       kr = vm_map_remove(map,
+                          vm_map_trunc_page(addr,
+                                            VM_MAP_PAGE_MASK(map)),
+                          vm_map_round_page(addr + size,
+                                            VM_MAP_PAGE_MASK(map)), 
+                          VM_MAP_REMOVE_KUNWIRE);
        if (kr != KERN_SUCCESS)
                panic("kmem_free");
 }
@@ -608,8 +1059,10 @@ kmem_remap_pages(
        /*
         *      Mark the pmap region as not pageable.
         */
-       map_start = vm_map_trunc_page(start);
-       map_end = vm_map_round_page(end);
+       map_start = vm_map_trunc_page(start,
+                                     VM_MAP_PAGE_MASK(kernel_map));
+       map_end = vm_map_round_page(end,
+                                   VM_MAP_PAGE_MASK(kernel_map));
 
        pmap_pageable(kernel_pmap, map_start, map_end, FALSE);
 
@@ -627,7 +1080,7 @@ kmem_remap_pages(
            /*
             *  Wire it down (again)
             */
-           vm_page_lock_queues();
+           vm_page_lockspin_queues();
            vm_page_wire(mem);
            vm_page_unlock_queues();
            vm_object_unlock(object);
@@ -644,10 +1097,11 @@ kmem_remap_pages(
             *  Enter it in the kernel pmap.  The page isn't busy,
             *  but this shouldn't be a problem because it is wired.
             */
-           PMAP_ENTER(kernel_pmap, map_start, mem, protection, 
-                       ((unsigned int)(mem->object->wimg_bits))
-                                       & VM_WIMG_MASK,
-                       TRUE);
+
+           mem->pmapped = TRUE;
+           mem->wpmapped = TRUE;
+
+           PMAP_ENTER(kernel_pmap, map_start, mem, protection, VM_PROT_NONE, 0, TRUE);
 
            map_start += PAGE_SIZE;
            offset += PAGE_SIZE;
@@ -683,7 +1137,8 @@ kmem_suballoc(
        vm_map_size_t   map_size;
        kern_return_t   kr;
 
-       map_size = vm_map_round_page(size);
+       map_size = vm_map_round_page(size,
+                                    VM_MAP_PAGE_MASK(parent));
 
        /*
         *      Need reference on submap object because it is internal
@@ -692,8 +1147,10 @@ kmem_suballoc(
         */
        vm_object_reference(vm_submap_object);
 
-       map_addr = (flags & VM_FLAGS_ANYWHERE) ?
-                  vm_map_min(parent) : vm_map_trunc_page(*addr);
+       map_addr = ((flags & VM_FLAGS_ANYWHERE)
+                   ? vm_map_min(parent)
+                   : vm_map_trunc_page(*addr,
+                                       VM_MAP_PAGE_MASK(parent)));
 
        kr = vm_map_enter(parent, &map_addr, map_size,
                          (vm_map_offset_t) 0, flags,
@@ -708,6 +1165,8 @@ kmem_suballoc(
        map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable);
        if (map == VM_MAP_NULL)
                panic("kmem_suballoc: vm_map_create failed");   /* "can't happen" */
+       /* inherit the parent map's page size */
+       vm_map_set_page_shift(map, VM_MAP_PAGE_SHIFT(parent));
 
        kr = vm_map_submap(parent, map_addr, map_addr + map_size, map, map_addr, FALSE);
        if (kr != KERN_SUCCESS) {
@@ -738,39 +1197,52 @@ kmem_init(
        vm_map_offset_t map_start;
        vm_map_offset_t map_end;
 
-       map_start = vm_map_trunc_page(start);
-       map_end = vm_map_round_page(end);
-
-       kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_ADDRESS,
-                                  map_end, FALSE);
+       map_start = vm_map_trunc_page(start,
+                                     VM_MAP_PAGE_MASK(kernel_map));
+       map_end = vm_map_round_page(end,
+                                   VM_MAP_PAGE_MASK(kernel_map));
 
+       kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS,
+                           map_end, FALSE);
        /*
         *      Reserve virtual memory allocated up to this time.
         */
-
-       if (start != VM_MIN_KERNEL_ADDRESS) {
+       if (start != VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
                vm_map_offset_t map_addr;
+               kern_return_t kr;
+               map_addr = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
+               kr = vm_map_enter(kernel_map,
+                       &map_addr, 
+                       (vm_map_size_t)(map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
+                       (vm_map_offset_t) 0,
+                       VM_FLAGS_FIXED | VM_FLAGS_NO_PMAP_CHECK,
+                       VM_OBJECT_NULL, 
+                       (vm_object_offset_t) 0, FALSE,
+                       VM_PROT_NONE, VM_PROT_NONE,
+                       VM_INHERIT_DEFAULT);
+               
+               if (kr != KERN_SUCCESS) {
+                       panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n",
+                             (uint64_t) start, (uint64_t) end,
+                             (uint64_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS,
+                             (uint64_t) (map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
+                             kr);
+               }       
+       }
 
-               map_addr = VM_MIN_KERNEL_ADDRESS;
-               (void) vm_map_enter(kernel_map,
-                                   &map_addr, 
-                                   (vm_map_size_t)(map_start - VM_MIN_KERNEL_ADDRESS),
-                                   (vm_map_offset_t) 0,
-                                   VM_FLAGS_ANYWHERE | VM_FLAGS_NO_PMAP_CHECK,
-                                   VM_OBJECT_NULL, 
-                                   (vm_object_offset_t) 0, FALSE,
-                                   VM_PROT_DEFAULT, VM_PROT_ALL,
-                                   VM_INHERIT_DEFAULT);
-       }
-
-        /*
-         * Account for kernel memory (text, data, bss, vm shenanigans).
-         * This may include inaccessible "holes" as determined by what
-         * the machine-dependent init code includes in max_mem.
-         */
-        vm_page_wire_count = (atop_64(max_mem) - (vm_page_free_count
-                                                + vm_page_active_count
-                                                + vm_page_inactive_count));
+       /*
+        * Set the default global user wire limit which limits the amount of
+        * memory that can be locked via mlock().  We set this to the total
+        * amount of memory that are potentially usable by a user app (max_mem)
+        * minus a certain amount.  This can be overridden via a sysctl.
+        */
+       vm_global_no_user_wire_amount = MIN(max_mem*20/100,
+                                           VM_NOT_USER_WIREABLE);
+       vm_global_user_wire_limit = max_mem - vm_global_no_user_wire_amount;
+       
+       /* the default per user limit is the same as the global limit */
+       vm_user_wire_limit = vm_global_user_wire_limit;
 }
 
 
@@ -887,7 +1359,7 @@ vm_conflict_check(
                obj = entry->object.vm_object;
                obj_off = (off - entry->vme_start) + entry->offset;
                while(obj->shadow) {
-                       obj_off += obj->shadow_offset;
+                       obj_off += obj->vo_shadow_offset;
                        obj = obj->shadow;
                }
                if((obj->pager_created) && (obj->pager == pager)) {