]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_kern.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_kern.c
index d5ed1f659d2e4e6d543d799e65c3798bfc966a3c..8e53cbd13802039bf99dba3315ee828938eebbc6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
 #include <mach/kern_return.h>
 #include <mach/vm_param.h>
 #include <kern/assert.h>
-#include <kern/lock.h>
 #include <kern/thread.h>
 #include <vm/vm_kern.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
+#include <vm/vm_compressor.h>
 #include <vm/vm_pageout.h>
 #include <kern/misc_protos.h>
 #include <vm/cpm.h>
 
 #include <string.h>
+
+#include <libkern/OSDebug.h>
+#include <libkern/crypto/sha2.h>
+#include <sys/kdebug.h>
+
+#include <san/kasan.h>
+
 /*
  *     Variables exported by this module.
  */
 vm_map_t       kernel_map;
 vm_map_t       kernel_pageable_map;
 
+extern boolean_t vm_kernel_ready;
+
 /*
  * Forward declarations for internal functions.
  */
 extern kern_return_t kmem_alloc_pages(
-       register vm_object_t            object,
-       register vm_object_offset_t     offset,
-       register vm_object_size_t       size);
-
-extern void kmem_remap_pages(
-       register vm_object_t            object,
-       register vm_object_offset_t     offset,
-       register vm_offset_t            start,
-       register vm_offset_t            end,
-       vm_prot_t                       protection);
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       vm_object_size_t        size);
 
 kern_return_t
 kmem_alloc_contig(
@@ -105,7 +107,10 @@ kmem_alloc_contig(
        vm_offset_t             *addrp,
        vm_size_t               size,
        vm_offset_t             mask,
-       int                     flags)
+       ppnum_t                 max_pnum,
+       ppnum_t                 pnum_mask,
+       int                     flags,
+       vm_tag_t                tag)
 {
        vm_object_t             object;
        vm_object_offset_t      offset;
@@ -116,17 +121,21 @@ kmem_alloc_contig(
        vm_page_t               m, pages;
        kern_return_t           kr;
 
-       if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT))) 
+    assert(VM_KERN_MEMORY_NONE != tag);
+
+       if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT))) 
                return KERN_INVALID_ARGUMENT;
+
+       map_size = vm_map_round_page(size,
+                                    VM_MAP_PAGE_MASK(map));
+       map_mask = (vm_map_offset_t)mask;
        
-       if (size == 0) {
+       /* Check for zero allocation size (either directly or via overflow) */
+       if (map_size == 0) {
                *addrp = 0;
                return KERN_INVALID_ARGUMENT;
        }
 
-       map_size = vm_map_round_page(size);
-       map_mask = (vm_map_offset_t)mask;
-
        /*
         *      Allocate a new object (if necessary) and the reference we
         *      will be donating to the map entry.  We must do this before
@@ -139,25 +148,34 @@ kmem_alloc_contig(
                object = vm_object_allocate(map_size);
        }
 
-       kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry);
+       kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0,
+                              VM_MAP_KERNEL_FLAGS_NONE, tag, &entry);
        if (KERN_SUCCESS != kr) {
                vm_object_deallocate(object);
                return kr;
        }
 
-       entry->object.vm_object = object;
-       entry->offset = offset = (object == kernel_object) ? 
-                       map_addr - VM_MIN_KERNEL_ADDRESS : 0;
+       if (object == kernel_object) {
+               offset = map_addr;
+       } else {
+               offset = 0;
+       }
+       VME_OBJECT_SET(entry, object);
+       VME_OFFSET_SET(entry, offset);
 
        /* Take an extra object ref in case the map entry gets deleted */
        vm_object_reference(object);
        vm_map_unlock(map);
 
-       kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, FALSE);
+       kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, max_pnum, pnum_mask, FALSE, flags);
 
        if (kr != KERN_SUCCESS) {
-               vm_map_remove(map, vm_map_trunc_page(map_addr),
-                             vm_map_round_page(map_addr + map_size), 0);
+               vm_map_remove(map,
+                             vm_map_trunc_page(map_addr,
+                                               VM_MAP_PAGE_MASK(map)),
+                             vm_map_round_page(map_addr + map_size,
+                                               VM_MAP_PAGE_MASK(map)),
+                             VM_MAP_REMOVE_NO_FLAGS);
                vm_object_deallocate(object);
                *addrp = 0;
                return kr;
@@ -167,30 +185,44 @@ kmem_alloc_contig(
        for (i = 0; i < map_size; i += PAGE_SIZE) {
                m = pages;
                pages = NEXT_PAGE(m);
-               m->busy = FALSE;
+               *(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
+               m->vmp_busy = FALSE;
                vm_page_insert(m, object, offset + i);
        }
        vm_object_unlock(object);
 
-       if ((kr = vm_map_wire(map, vm_map_trunc_page(map_addr),
-                             vm_map_round_page(map_addr + map_size), VM_PROT_DEFAULT, FALSE)) 
-               != KERN_SUCCESS) {
+       kr = vm_map_wire_kernel(map,
+                        vm_map_trunc_page(map_addr,
+                                          VM_MAP_PAGE_MASK(map)),
+                        vm_map_round_page(map_addr + map_size,
+                                          VM_MAP_PAGE_MASK(map)),
+                        VM_PROT_DEFAULT, tag,
+                        FALSE);
+
+       if (kr != KERN_SUCCESS) {
                if (object == kernel_object) {
                        vm_object_lock(object);
                        vm_object_page_remove(object, offset, offset + map_size);
                        vm_object_unlock(object);
                }
-               vm_map_remove(map, vm_map_trunc_page(map_addr), 
-                             vm_map_round_page(map_addr + map_size), 0);
+               vm_map_remove(map,
+                             vm_map_trunc_page(map_addr,
+                                               VM_MAP_PAGE_MASK(map)), 
+                             vm_map_round_page(map_addr + map_size,
+                                               VM_MAP_PAGE_MASK(map)),
+                             VM_MAP_REMOVE_NO_FLAGS);
                vm_object_deallocate(object);
                return kr;
        }
        vm_object_deallocate(object);
 
-       if (object == kernel_object)
+       if (object == kernel_object) {
                vm_map_simplify(map, map_addr);
+           vm_tag_update_size(tag, map_size);
+    }
+       *addrp = (vm_offset_t) map_addr;
+       assert((vm_map_offset_t) *addrp == map_addr);
 
-       *addrp = map_addr;
        return KERN_SUCCESS;
 }
 
@@ -205,32 +237,166 @@ kmem_alloc_contig(
  *               KMA_HERE              *addrp is base address, else "anywhere"
  *               KMA_NOPAGEWAIT        don't wait for pages if unavailable
  *               KMA_KOBJECT           use kernel_object
+ *               KMA_LOMEM             support for 32 bit devices in a 64 bit world
+ *                                     if set and a lomemory pool is available
+ *                                     grab pages from it... this also implies
+ *                                     KMA_NOPAGEWAIT
  */
 
 kern_return_t
 kernel_memory_allocate(
-       register vm_map_t       map,
-       register vm_offset_t    *addrp,
-       register vm_size_t      size,
-       register vm_offset_t    mask,
-       int                     flags)
+       vm_map_t        map,
+       vm_offset_t     *addrp,
+       vm_size_t       size,
+       vm_offset_t     mask,
+       int                     flags,
+       vm_tag_t                tag)
 {
        vm_object_t             object;
        vm_object_offset_t      offset;
-       vm_map_entry_t          entry;
-       vm_map_offset_t         map_addr;
+       vm_object_offset_t      pg_offset;
+       vm_map_entry_t          entry = NULL;
+       vm_map_offset_t         map_addr, fill_start;
        vm_map_offset_t         map_mask;
-       vm_map_size_t           map_size;
-       vm_map_size_t           i;
-       kern_return_t           kr;
+       vm_map_size_t           map_size, fill_size;
+       kern_return_t           kr, pe_result;
+       vm_page_t               mem;
+       vm_page_t               guard_page_list = NULL;
+       vm_page_t               wired_page_list = NULL;
+       int                     guard_page_count = 0;
+       int                     wired_page_count = 0;
+       int                     page_grab_count = 0;
+       int                     i;
+       int                     vm_alloc_flags;
+       vm_map_kernel_flags_t   vmk_flags;
+       vm_prot_t               kma_prot;
+
+       if (! vm_kernel_ready) {
+               panic("kernel_memory_allocate: VM is not ready");
+       }
+
+       map_size = vm_map_round_page(size,
+                                    VM_MAP_PAGE_MASK(map));
+       map_mask = (vm_map_offset_t) mask;
 
-       if (size == 0) {
+       vm_alloc_flags = 0; //VM_MAKE_TAG(tag);
+       vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+
+       /* Check for zero allocation size (either directly or via overflow) */
+       if (map_size == 0) {
                *addrp = 0;
                return KERN_INVALID_ARGUMENT;
        }
 
-       map_size = vm_map_round_page(size);
-       map_mask = (vm_map_offset_t) mask;
+       /*
+        * limit the size of a single extent of wired memory
+        * to try and limit the damage to the system if
+        * too many pages get wired down
+        * limit raised to 2GB with 128GB max physical limit,
+        * but scaled by installed memory above this
+        */
+        if (!(flags & (KMA_VAONLY | KMA_PAGEABLE)) &&
+           map_size > MAX(1ULL<<31, sane_size/64)) {
+                return KERN_RESOURCE_SHORTAGE;
+        }
+
+       /*
+        * Guard pages:
+        *
+        * Guard pages are implemented as ficticious pages.  By placing guard pages
+        * on either end of a stack, they can help detect cases where a thread walks
+        * off either end of its stack.  They are allocated and set up here and attempts
+        * to access those pages are trapped in vm_fault_page().
+        *
+        * The map_size we were passed may include extra space for
+        * guard pages.  If those were requested, then back it out of fill_size
+        * since vm_map_find_space() takes just the actual size not including
+        * guard pages.  Similarly, fill_start indicates where the actual pages
+        * will begin in the range.
+        */
+
+       fill_start = 0;
+       fill_size = map_size;
+
+       if (flags & KMA_GUARD_FIRST) {
+               vmk_flags.vmkf_guard_before = TRUE;
+               fill_start += PAGE_SIZE_64;
+               fill_size -= PAGE_SIZE_64;
+               if (map_size < fill_start + fill_size) {
+                       /* no space for a guard page */
+                       *addrp = 0;
+                       return KERN_INVALID_ARGUMENT;
+               }
+               guard_page_count++;
+       }
+       if (flags & KMA_GUARD_LAST) {
+               vmk_flags.vmkf_guard_after = TRUE;
+               fill_size -= PAGE_SIZE_64;
+               if (map_size <= fill_start + fill_size) {
+                       /* no space for a guard page */
+                       *addrp = 0;
+                       return KERN_INVALID_ARGUMENT;
+               }
+               guard_page_count++;
+       }
+       wired_page_count = (int) (fill_size / PAGE_SIZE_64);
+       assert(wired_page_count * PAGE_SIZE_64 == fill_size);
+
+#if DEBUG || DEVELOPMENT
+       VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START, size, 0, 0, 0);
+#endif
+
+       for (i = 0; i < guard_page_count; i++) {
+               for (;;) {
+                       mem = vm_page_grab_guard();
+
+                       if (mem != VM_PAGE_NULL)
+                               break;
+                       if (flags & KMA_NOPAGEWAIT) {
+                               kr = KERN_RESOURCE_SHORTAGE;
+                               goto out;
+                       }
+                       vm_page_more_fictitious();
+               }
+               mem->vmp_snext = guard_page_list;
+               guard_page_list = mem;
+       }
+
+       if (!(flags & (KMA_VAONLY | KMA_PAGEABLE))) {
+       for (i = 0; i < wired_page_count; i++) {
+               uint64_t        unavailable;
+               
+               for (;;) {
+                       if (flags & KMA_LOMEM)
+                               mem = vm_page_grablo();
+                       else
+                               mem = vm_page_grab();
+
+                       if (mem != VM_PAGE_NULL)
+                               break;
+
+                       if (flags & KMA_NOPAGEWAIT) {
+                               kr = KERN_RESOURCE_SHORTAGE;
+                               goto out;
+                       }
+                       if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
+                               kr = KERN_RESOURCE_SHORTAGE;
+                               goto out;
+                       }
+                       unavailable = (vm_page_wire_count + vm_page_free_target) * PAGE_SIZE;
+
+                       if (unavailable > max_mem || map_size > (max_mem - unavailable)) {
+                               kr = KERN_RESOURCE_SHORTAGE;
+                               goto out;
+                       }
+                       VM_PAGE_WAIT();
+               }
+               page_grab_count++;
+               if (KMA_ZERO & flags) vm_page_zero_fill(mem);
+               mem->vmp_snext = wired_page_list;
+               wired_page_list = mem;
+       }
+       }
 
        /*
         *      Allocate a new object (if necessary).  We must do this before
@@ -239,66 +405,459 @@ kernel_memory_allocate(
        if ((flags & KMA_KOBJECT) != 0) {
                object = kernel_object;
                vm_object_reference(object);
+       } else if ((flags & KMA_COMPRESSOR) != 0) {
+               object = compressor_object;
+               vm_object_reference(object);
        } else {
                object = vm_object_allocate(map_size);
        }
 
-       kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry);
+       if (flags & KMA_ATOMIC)
+               vmk_flags.vmkf_atomic_entry = TRUE;
+
+       kr = vm_map_find_space(map, &map_addr,
+                              fill_size, map_mask,
+                              vm_alloc_flags, vmk_flags, tag, &entry);
        if (KERN_SUCCESS != kr) {
                vm_object_deallocate(object);
-               return kr;
+               goto out;
        }
 
-       entry->object.vm_object = object;
-       entry->offset = offset = (object == kernel_object) ? 
-                       map_addr - VM_MIN_KERNEL_ADDRESS : 0;
+       if (object == kernel_object || object == compressor_object) {
+               offset = map_addr;
+       } else {
+               offset = 0;
+       }
+       VME_OBJECT_SET(entry, object);
+       VME_OFFSET_SET(entry, offset);
+       
+       if (!(flags & (KMA_COMPRESSOR | KMA_PAGEABLE)))
+               entry->wired_count++;
 
-       vm_object_reference(object);
-       vm_map_unlock(map);
+       if (flags & KMA_PERMANENT)
+               entry->permanent = TRUE;
+
+       if (object != kernel_object && object != compressor_object)
+               vm_object_reference(object);
 
        vm_object_lock(object);
-       for (i = 0; i < map_size; i += PAGE_SIZE) {
-               vm_page_t       mem;
+       vm_map_unlock(map);
 
-               while (VM_PAGE_NULL == 
-                      (mem = vm_page_alloc(object, offset + i))) {
-                       if (flags & KMA_NOPAGEWAIT) {
-                               if (object == kernel_object)
-                                       vm_object_page_remove(object, offset, offset + i);
-                               vm_object_unlock(object);
-                               vm_map_remove(map, map_addr, map_addr + map_size, 0);
-                               vm_object_deallocate(object);
-                               return KERN_RESOURCE_SHORTAGE;
-                       }
+       pg_offset = 0;
+
+       if (fill_start) {
+               if (guard_page_list == NULL)
+                       panic("kernel_memory_allocate: guard_page_list == NULL");
+
+               mem = guard_page_list;
+               guard_page_list = mem->vmp_snext;
+               mem->vmp_snext = NULL;
+
+               vm_page_insert(mem, object, offset + pg_offset);
+
+               mem->vmp_busy = FALSE;
+               pg_offset += PAGE_SIZE_64;
+       }
+
+       kma_prot = VM_PROT_READ | VM_PROT_WRITE;
+
+#if KASAN
+       if (!(flags & KMA_VAONLY)) {
+               /* for VAONLY mappings we notify in populate only */
+               kasan_notify_address(map_addr, size);
+       }
+#endif
+
+       if (flags & (KMA_VAONLY | KMA_PAGEABLE)) {
+               pg_offset = fill_start + fill_size;
+       } else {
+       for (pg_offset = fill_start; pg_offset < fill_start + fill_size; pg_offset += PAGE_SIZE_64) {
+               if (wired_page_list == NULL)
+                       panic("kernel_memory_allocate: wired_page_list == NULL");
+
+               mem = wired_page_list;
+               wired_page_list = mem->vmp_snext;
+               mem->vmp_snext = NULL;
+
+               assert(mem->vmp_wire_count == 0);
+               assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
+
+               mem->vmp_q_state = VM_PAGE_IS_WIRED;
+               mem->vmp_wire_count++;
+               if (__improbable(mem->vmp_wire_count == 0)) {
+                       panic("kernel_memory_allocate(%p): wire_count overflow",
+                             mem);
+               }
+
+               vm_page_insert_wired(mem, object, offset + pg_offset, tag);
+
+               mem->vmp_busy = FALSE;
+               mem->vmp_pmapped = TRUE;
+               mem->vmp_wpmapped = TRUE;
+
+               PMAP_ENTER_OPTIONS(kernel_pmap, map_addr + pg_offset, mem,
+                                  kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+                                  PMAP_OPTIONS_NOWAIT, pe_result);
+
+               if (pe_result == KERN_RESOURCE_SHORTAGE) {
                        vm_object_unlock(object);
-                       VM_PAGE_WAIT();
+
+                       PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem, 
+                                  kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+                                  pe_result);
+
                        vm_object_lock(object);
                }
-               mem->busy = FALSE;
-       }
-       vm_object_unlock(object);
 
-       if ((kr = vm_map_wire(map, map_addr, map_addr + map_size, VM_PROT_DEFAULT, FALSE)) 
-               != KERN_SUCCESS) {
-               if (object == kernel_object) {
-                       vm_object_lock(object);
-                       vm_object_page_remove(object, offset, offset + map_size);
-                       vm_object_unlock(object);
+               assert(pe_result == KERN_SUCCESS);
+
+               if (flags & KMA_NOENCRYPT) {
+                       bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE);
+
+                       pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
                }
-               vm_map_remove(map, map_addr, map_addr + map_size, 0);
-               vm_object_deallocate(object);
-               return (kr);
        }
-       /* now that the page is wired, we no longer have to fear coalesce */
-       vm_object_deallocate(object);
-       if (object == kernel_object)
+       if (kernel_object == object) vm_tag_update_size(tag, fill_size);
+       }
+       if ((fill_start + fill_size) < map_size) {
+               if (guard_page_list == NULL)
+                       panic("kernel_memory_allocate: guard_page_list == NULL");
+
+               mem = guard_page_list;
+               guard_page_list = mem->vmp_snext;
+               mem->vmp_snext = NULL;
+
+               vm_page_insert(mem, object, offset + pg_offset);
+
+               mem->vmp_busy = FALSE;
+       }
+       if (guard_page_list || wired_page_list)
+               panic("kernel_memory_allocate: non empty list\n");
+
+       if (!(flags & (KMA_VAONLY | KMA_PAGEABLE))) {
+       vm_page_lockspin_queues();
+       vm_page_wire_count += wired_page_count;
+       vm_page_unlock_queues();
+       }
+
+       vm_object_unlock(object);
+
+       /*
+        * now that the pages are wired, we no longer have to fear coalesce
+        */
+       if (object == kernel_object || object == compressor_object)
                vm_map_simplify(map, map_addr);
+       else
+               vm_object_deallocate(object);
+
+#if DEBUG || DEVELOPMENT
+       VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
+#endif
 
        /*
         *      Return the memory, not zeroed.
         */
        *addrp = CAST_DOWN(vm_offset_t, map_addr);
        return KERN_SUCCESS;
+
+out:
+       if (guard_page_list)
+               vm_page_free_list(guard_page_list, FALSE);
+
+       if (wired_page_list)
+               vm_page_free_list(wired_page_list, FALSE);
+
+#if DEBUG || DEVELOPMENT
+       VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
+#endif
+
+       return kr;
+}
+
+kern_return_t
+kernel_memory_populate(
+       vm_map_t        map,
+       vm_offset_t     addr,
+       vm_size_t       size,
+       int             flags,
+       vm_tag_t        tag)
+{
+       vm_object_t             object;
+       vm_object_offset_t      offset, pg_offset;
+       kern_return_t           kr, pe_result;
+       vm_page_t               mem;
+       vm_page_t               page_list = NULL;
+       int                     page_count = 0;
+       int                     page_grab_count = 0;
+       int                     i;
+
+#if DEBUG || DEVELOPMENT
+       VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START, size, 0, 0, 0);
+#endif
+
+       page_count = (int) (size / PAGE_SIZE_64);
+
+       assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
+
+       if (flags & KMA_COMPRESSOR) {
+
+               pg_offset = page_count * PAGE_SIZE_64;
+
+               do {
+                       for (;;) {
+                               mem = vm_page_grab();
+
+                               if (mem != VM_PAGE_NULL)
+                                       break;
+                               
+                               VM_PAGE_WAIT();
+                       }
+                       page_grab_count++;
+                       if (KMA_ZERO & flags) vm_page_zero_fill(mem);
+                       mem->vmp_snext = page_list;
+                       page_list = mem;
+
+                       pg_offset -= PAGE_SIZE_64;
+
+                       kr = pmap_enter_options(kernel_pmap,
+                                                 addr + pg_offset, VM_PAGE_GET_PHYS_PAGE(mem),
+                                                 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
+                                                 PMAP_OPTIONS_INTERNAL, NULL);
+                       assert(kr == KERN_SUCCESS);
+
+               } while (pg_offset);
+
+               offset = addr;
+               object = compressor_object;
+
+               vm_object_lock(object);
+
+               for (pg_offset = 0;
+                    pg_offset < size;
+                    pg_offset += PAGE_SIZE_64) {
+
+                       mem = page_list;
+                       page_list = mem->vmp_snext;
+                       mem->vmp_snext = NULL;
+
+                       vm_page_insert(mem, object, offset + pg_offset);
+                       assert(mem->vmp_busy);
+
+                       mem->vmp_busy = FALSE;
+                       mem->vmp_pmapped = TRUE;
+                       mem->vmp_wpmapped = TRUE;
+                       mem->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR;
+               }
+               vm_object_unlock(object);
+
+#if KASAN
+               if (map == compressor_map) {
+                       kasan_notify_address_nopoison(addr, size);
+               } else {
+                       kasan_notify_address(addr, size);
+               }
+#endif
+
+#if DEBUG || DEVELOPMENT
+               VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
+#endif
+               return KERN_SUCCESS;
+       }
+
+       for (i = 0; i < page_count; i++) {
+               for (;;) {
+                       if (flags & KMA_LOMEM)
+                               mem = vm_page_grablo();
+                       else
+                               mem = vm_page_grab();
+                       
+                       if (mem != VM_PAGE_NULL)
+                               break;
+
+                       if (flags & KMA_NOPAGEWAIT) {
+                               kr = KERN_RESOURCE_SHORTAGE;
+                               goto out;
+                       }
+                       if ((flags & KMA_LOMEM) &&
+                           (vm_lopage_needed == TRUE)) {
+                               kr = KERN_RESOURCE_SHORTAGE;
+                               goto out;
+                       }
+                       VM_PAGE_WAIT();
+               }
+               page_grab_count++;
+               if (KMA_ZERO & flags) vm_page_zero_fill(mem);
+               mem->vmp_snext = page_list;
+               page_list = mem;
+       }
+       if (flags & KMA_KOBJECT) {
+               offset = addr;
+               object = kernel_object;
+
+               vm_object_lock(object);
+       } else {
+               /*
+                * If it's not the kernel object, we need to:
+                *      lock map;
+                *      lookup entry;
+                *      lock object;
+                *      take reference on object;
+                *      unlock map;
+                */
+               panic("kernel_memory_populate(%p,0x%llx,0x%llx,0x%x): "
+                     "!KMA_KOBJECT",
+                     map, (uint64_t) addr, (uint64_t) size, flags);
+       }
+
+       for (pg_offset = 0;
+            pg_offset < size;
+            pg_offset += PAGE_SIZE_64) {
+
+               if (page_list == NULL)
+                       panic("kernel_memory_populate: page_list == NULL");
+
+               mem = page_list;
+               page_list = mem->vmp_snext;
+               mem->vmp_snext = NULL;
+
+               assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
+               mem->vmp_q_state = VM_PAGE_IS_WIRED;
+               mem->vmp_wire_count++;
+               if (__improbable(mem->vmp_wire_count == 0)) {
+                       panic("kernel_memory_populate(%p): wire_count overflow", mem);
+               }
+
+               vm_page_insert_wired(mem, object, offset + pg_offset, tag);
+
+               mem->vmp_busy = FALSE;
+               mem->vmp_pmapped = TRUE;
+               mem->vmp_wpmapped = TRUE;
+
+               PMAP_ENTER_OPTIONS(kernel_pmap, addr + pg_offset, mem,
+                                  VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
+                                  ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+                                  PMAP_OPTIONS_NOWAIT, pe_result);
+
+               if (pe_result == KERN_RESOURCE_SHORTAGE) {
+
+                       vm_object_unlock(object);
+
+                       PMAP_ENTER(kernel_pmap, addr + pg_offset, mem,
+                                  VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
+                                  ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+                                  pe_result);
+
+                       vm_object_lock(object);
+               }
+
+               assert(pe_result == KERN_SUCCESS);
+
+               if (flags & KMA_NOENCRYPT) {
+                       bzero(CAST_DOWN(void *, (addr + pg_offset)), PAGE_SIZE);
+                       pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
+               }
+       }
+       vm_page_lockspin_queues();
+       vm_page_wire_count += page_count;
+       vm_page_unlock_queues();
+
+#if DEBUG || DEVELOPMENT
+       VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
+#endif
+
+       if (kernel_object == object) vm_tag_update_size(tag, size);
+
+       vm_object_unlock(object);
+
+#if KASAN
+       if (map == compressor_map) {
+               kasan_notify_address_nopoison(addr, size);
+       } else {
+               kasan_notify_address(addr, size);
+       }
+#endif
+       return KERN_SUCCESS;
+
+out:
+       if (page_list)
+               vm_page_free_list(page_list, FALSE);
+
+#if DEBUG || DEVELOPMENT
+       VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
+#endif
+
+       return kr;
+}
+
+
+void
+kernel_memory_depopulate(
+       vm_map_t        map,
+       vm_offset_t     addr,
+       vm_size_t       size,
+       int             flags)
+{
+       vm_object_t             object;
+       vm_object_offset_t      offset, pg_offset;
+       vm_page_t               mem;
+       vm_page_t               local_freeq = NULL;
+
+       assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
+
+       if (flags & KMA_COMPRESSOR) {
+               offset = addr;
+               object = compressor_object;
+
+               vm_object_lock(object);
+       } else if (flags & KMA_KOBJECT) {
+               offset = addr;
+               object = kernel_object;
+               vm_object_lock(object);
+       } else {
+               offset = 0;
+               object = NULL;
+                /*
+                 * If it's not the kernel object, we need to:
+                 *      lock map;
+                 *      lookup entry;
+                 *      lock object;
+                 *      unlock map;
+                 */
+               panic("kernel_memory_depopulate(%p,0x%llx,0x%llx,0x%x): "
+                     "!KMA_KOBJECT",
+                     map, (uint64_t) addr, (uint64_t) size, flags);
+       }
+       pmap_protect(kernel_map->pmap, offset, offset + size, VM_PROT_NONE);
+
+       for (pg_offset = 0;
+            pg_offset < size;
+            pg_offset += PAGE_SIZE_64) {
+
+               mem = vm_page_lookup(object, offset + pg_offset);
+
+               assert(mem);
+               
+               if (mem->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR)
+                       pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
+
+               mem->vmp_busy = TRUE;
+
+               assert(mem->vmp_tabled);
+               vm_page_remove(mem, TRUE);
+               assert(mem->vmp_busy);
+
+               assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
+               assert((mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
+                      (mem->vmp_q_state == VM_PAGE_NOT_ON_Q));
+
+               mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
+               mem->vmp_snext = local_freeq;
+               local_freeq = mem;
+       }
+       vm_object_unlock(object);
+
+       if (local_freeq)
+               vm_page_free_list(local_freeq, TRUE);
 }
 
 /*
@@ -309,12 +868,36 @@ kernel_memory_allocate(
  */
 
 kern_return_t
-kmem_alloc(
+kmem_alloc_external(
        vm_map_t        map,
        vm_offset_t     *addrp,
        vm_size_t       size)
 {
-       return kernel_memory_allocate(map, addrp, size, 0, 0);
+    return (kmem_alloc(map, addrp, size, vm_tag_bt()));
+}
+
+
+kern_return_t
+kmem_alloc(
+       vm_map_t        map,
+       vm_offset_t     *addrp,
+       vm_size_t       size,
+       vm_tag_t        tag)
+{
+       return kmem_alloc_flags(map, addrp, size, tag, 0);
+}
+
+kern_return_t
+kmem_alloc_flags(
+       vm_map_t        map,
+       vm_offset_t     *addrp,
+       vm_size_t       size,
+       vm_tag_t        tag,
+       int             flags)
+{
+       kern_return_t kr = kernel_memory_allocate(map, addrp, size, 0, flags, tag);
+       TRACE_MACHLEAKS(KMEM_ALLOC_CODE, KMEM_ALLOC_CODE_2, size, *addrp);
+       return kr;
 }
 
 /*
@@ -333,7 +916,8 @@ kmem_realloc(
        vm_offset_t             oldaddr,
        vm_size_t               oldsize,
        vm_offset_t             *newaddrp,
-       vm_size_t               newsize)
+       vm_size_t               newsize,
+       vm_tag_t                tag)
 {
        vm_object_t             object;
        vm_object_offset_t      offset;
@@ -347,11 +931,18 @@ kmem_realloc(
        vm_page_t               mem;
        kern_return_t           kr;
 
-       oldmapmin = vm_map_trunc_page(oldaddr);
-       oldmapmax = vm_map_round_page(oldaddr + oldsize);
+       oldmapmin = vm_map_trunc_page(oldaddr,
+                                     VM_MAP_PAGE_MASK(map));
+       oldmapmax = vm_map_round_page(oldaddr + oldsize,
+                                     VM_MAP_PAGE_MASK(map));
        oldmapsize = oldmapmax - oldmapmin;
-       newmapsize = vm_map_round_page(newsize);
-
+       newmapsize = vm_map_round_page(newsize,
+                                      VM_MAP_PAGE_MASK(map));
+       if (newmapsize < newsize) {
+               /* overflow */
+               *newaddrp = 0;
+               return KERN_INVALID_ARGUMENT;
+       }
 
        /*
         *      Find the VM object backing the old region.
@@ -361,7 +952,7 @@ kmem_realloc(
 
        if (!vm_map_lookup_entry(map, oldmapmin, &oldentry))
                panic("kmem_realloc");
-       object = oldentry->object.vm_object;
+       object = VME_OBJECT(oldentry);
 
        /*
         *      Increase the size of the object and
@@ -374,9 +965,9 @@ kmem_realloc(
        /* attempt is made to realloc a kmem_alloc'd area       */
        vm_object_lock(object);
        vm_map_unlock(map);
-       if (object->size != oldmapsize)
+       if (object->vo_size != oldmapsize)
                panic("kmem_realloc");
-       object->size = newmapsize;
+       object->vo_size = newmapsize;
        vm_object_unlock(object);
 
        /* allocate the new pages while expanded portion of the */
@@ -389,25 +980,26 @@ kmem_realloc(
         */
 
        kr = vm_map_find_space(map, &newmapaddr, newmapsize,
-                              (vm_map_offset_t) 0, &newentry);
+                              (vm_map_offset_t) 0, 0,
+                              VM_MAP_KERNEL_FLAGS_NONE,
+                              tag,
+                              &newentry);
        if (kr != KERN_SUCCESS) {
                vm_object_lock(object);
                for(offset = oldmapsize; 
                    offset < newmapsize; offset += PAGE_SIZE) {
                        if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
-                               vm_page_lock_queues();
-                               vm_page_free(mem);
-                               vm_page_unlock_queues();
+                               VM_PAGE_FREE(mem);
                        }
                }
-               object->size = oldmapsize;
+               object->vo_size = oldmapsize;
                vm_object_unlock(object);
                vm_object_deallocate(object);
                return kr;
        }
-       newentry->object.vm_object = object;
-       newentry->offset = 0;
-       assert (newentry->wired_count == 0);
+       VME_OBJECT_SET(newentry, object);
+       VME_OFFSET_SET(newentry, 0);
+       assert(newentry->wired_count == 0);
 
        
        /* add an extra reference in case we have someone doing an */
@@ -415,30 +1007,31 @@ kmem_realloc(
        vm_object_reference(object);
        vm_map_unlock(map);
 
-       kr = vm_map_wire(map, newmapaddr, newmapaddr + newmapsize, VM_PROT_DEFAULT, FALSE);
+       kr = vm_map_wire_kernel(map, newmapaddr, newmapaddr + newmapsize,
+                        VM_PROT_DEFAULT, tag, FALSE);
        if (KERN_SUCCESS != kr) {
-               vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, 0);
+               vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, VM_MAP_REMOVE_NO_FLAGS);
                vm_object_lock(object);
                for(offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) {
                        if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
-                               vm_page_lock_queues();
-                               vm_page_free(mem);
-                               vm_page_unlock_queues();
+                               VM_PAGE_FREE(mem);
                        }
                }
-               object->size = oldmapsize;
+               object->vo_size = oldmapsize;
                vm_object_unlock(object);
                vm_object_deallocate(object);
                return (kr);
        }
        vm_object_deallocate(object);
 
+       if (kernel_object == object) vm_tag_update_size(tag, newmapsize);
+
        *newaddrp = CAST_DOWN(vm_offset_t, newmapaddr);
        return KERN_SUCCESS;
 }
 
 /*
- *     kmem_alloc_wired:
+ *     kmem_alloc_kobject:
  *
  *     Allocate wired-down memory in the kernel's address map
  *     or a submap.  The memory is not zero-filled.
@@ -449,18 +1042,28 @@ kmem_realloc(
  */
 
 kern_return_t
-kmem_alloc_wired(
+kmem_alloc_kobject_external(
        vm_map_t        map,
        vm_offset_t     *addrp,
        vm_size_t       size)
 {
-       return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT);
+    return (kmem_alloc_kobject(map, addrp, size, vm_tag_bt()));
+}
+
+kern_return_t
+kmem_alloc_kobject(
+       vm_map_t        map,
+       vm_offset_t     *addrp,
+       vm_size_t       size,
+       vm_tag_t        tag)
+{
+       return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT, tag);
 }
 
 /*
  *     kmem_alloc_aligned:
  *
- *     Like kmem_alloc_wired, except that the memory is aligned.
+ *     Like kmem_alloc_kobject, except that the memory is aligned.
  *     The size should be a power-of-2.
  */
 
@@ -468,11 +1071,12 @@ kern_return_t
 kmem_alloc_aligned(
        vm_map_t        map,
        vm_offset_t     *addrp,
-       vm_size_t       size)
+       vm_size_t       size,
+       vm_tag_t        tag)
 {
        if ((size & (size - 1)) != 0)
                panic("kmem_alloc_aligned: size not aligned");
-       return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT);
+       return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT, tag);
 }
 
 /*
@@ -482,30 +1086,52 @@ kmem_alloc_aligned(
  */
 
 kern_return_t
-kmem_alloc_pageable(
+kmem_alloc_pageable_external(
        vm_map_t        map,
        vm_offset_t     *addrp,
        vm_size_t       size)
+{
+    return (kmem_alloc_pageable(map, addrp, size, vm_tag_bt()));
+}
+
+kern_return_t
+kmem_alloc_pageable(
+       vm_map_t        map,
+       vm_offset_t     *addrp,
+       vm_size_t       size,
+       vm_tag_t        tag)
 {
        vm_map_offset_t map_addr;
        vm_map_size_t   map_size;
        kern_return_t kr;
 
 #ifndef normal
-       map_addr = (vm_map_min(map)) + 0x1000;
+       map_addr = (vm_map_min(map)) + PAGE_SIZE;
 #else
        map_addr = vm_map_min(map);
 #endif
-       map_size = vm_map_round_page(size);
+       map_size = vm_map_round_page(size,
+                                    VM_MAP_PAGE_MASK(map));
+       if (map_size < size) {
+               /* overflow */
+               *addrp = 0;
+               return KERN_INVALID_ARGUMENT;
+       }
 
        kr = vm_map_enter(map, &map_addr, map_size,
-                         (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE,
+                         (vm_map_offset_t) 0, 
+                         VM_FLAGS_ANYWHERE,
+                         VM_MAP_KERNEL_FLAGS_NONE,
+                         tag,
                          VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
                          VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
 
        if (kr != KERN_SUCCESS)
                return kr;
 
+#if KASAN
+       kasan_notify_address(map_addr, map_size);
+#endif
        *addrp = CAST_DOWN(vm_offset_t, map_addr);
        return KERN_SUCCESS;
 }
@@ -514,7 +1140,7 @@ kmem_alloc_pageable(
  *     kmem_free:
  *
  *     Release a region of kernel virtual memory allocated
- *     with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable,
+ *     with kmem_alloc, kmem_alloc_kobject, or kmem_alloc_pageable,
  *     and return the physical pages associated with that region.
  */
 
@@ -526,9 +1152,23 @@ kmem_free(
 {
        kern_return_t kr;
 
-       kr = vm_map_remove(map, vm_map_trunc_page(addr),
-                               vm_map_round_page(addr + size), 
-                               VM_MAP_REMOVE_KUNWIRE);
+       assert(addr >= VM_MIN_KERNEL_AND_KEXT_ADDRESS);
+
+       TRACE_MACHLEAKS(KMEM_FREE_CODE, KMEM_FREE_CODE_2, size, addr);
+
+       if(size == 0) {
+#if MACH_ASSERT
+               printf("kmem_free called with size==0 for map: %p with addr: 0x%llx\n",map,(uint64_t)addr);
+#endif
+               return;
+       }
+
+       kr = vm_map_remove(map,
+                          vm_map_trunc_page(addr,
+                                            VM_MAP_PAGE_MASK(map)),
+                          vm_map_round_page(addr + size,
+                                            VM_MAP_PAGE_MASK(map)), 
+                          VM_MAP_REMOVE_KUNWIRE);
        if (kr != KERN_SUCCESS)
                panic("kmem_free");
 }
@@ -539,16 +1179,16 @@ kmem_free(
 
 kern_return_t
 kmem_alloc_pages(
-       register vm_object_t            object,
-       register vm_object_offset_t     offset,
-       register vm_object_size_t       size)
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       vm_object_size_t        size)
 {
        vm_object_size_t                alloc_size;
 
        alloc_size = vm_object_round_page(size);
         vm_object_lock(object);
        while (alloc_size) {
-           register vm_page_t  mem;
+           vm_page_t   mem;
 
 
            /*
@@ -560,7 +1200,7 @@ kmem_alloc_pages(
                VM_PAGE_WAIT();
                vm_object_lock(object);
            }
-           mem->busy = FALSE;
+           mem->vmp_busy = FALSE;
 
            alloc_size -= PAGE_SIZE;
            offset += PAGE_SIZE;
@@ -569,72 +1209,6 @@ kmem_alloc_pages(
        return KERN_SUCCESS;
 }
 
-/*
- *     Remap wired pages in an object into a new region.
- *     The object is assumed to be mapped into the kernel map or
- *     a submap.
- */
-void
-kmem_remap_pages(
-       register vm_object_t            object,
-       register vm_object_offset_t     offset,
-       register vm_offset_t            start,
-       register vm_offset_t            end,
-       vm_prot_t                       protection)
-{
-
-       vm_map_offset_t                 map_start;
-       vm_map_offset_t                 map_end;
-
-       /*
-        *      Mark the pmap region as not pageable.
-        */
-       map_start = vm_map_trunc_page(start);
-       map_end = vm_map_round_page(end);
-
-       pmap_pageable(kernel_pmap, map_start, map_end, FALSE);
-
-       while (map_start < map_end) {
-           register vm_page_t  mem;
-
-           vm_object_lock(object);
-
-           /*
-            *  Find a page
-            */
-           if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
-               panic("kmem_remap_pages");
-
-           /*
-            *  Wire it down (again)
-            */
-           vm_page_lock_queues();
-           vm_page_wire(mem);
-           vm_page_unlock_queues();
-           vm_object_unlock(object);
-
-           /*
-            * ENCRYPTED SWAP:
-            * The page is supposed to be wired now, so it
-            * shouldn't be encrypted at this point.  It can
-            * safely be entered in the page table.
-            */
-           ASSERT_PAGE_DECRYPTED(mem);
-
-           /*
-            *  Enter it in the kernel pmap.  The page isn't busy,
-            *  but this shouldn't be a problem because it is wired.
-            */
-           PMAP_ENTER(kernel_pmap, map_start, mem, protection, 
-                       ((unsigned int)(mem->object->wimg_bits))
-                                       & VM_WIMG_MASK,
-                       TRUE);
-
-           map_start += PAGE_SIZE;
-           offset += PAGE_SIZE;
-       }
-}
-
 /*
  *     kmem_suballoc:
  *
@@ -657,6 +1231,8 @@ kmem_suballoc(
        vm_size_t       size,
        boolean_t       pageable,
        int             flags,
+       vm_map_kernel_flags_t vmk_flags,
+       vm_tag_t    tag,
        vm_map_t        *new_map)
 {
        vm_map_t        map;
@@ -664,7 +1240,13 @@ kmem_suballoc(
        vm_map_size_t   map_size;
        kern_return_t   kr;
 
-       map_size = vm_map_round_page(size);
+       map_size = vm_map_round_page(size,
+                                    VM_MAP_PAGE_MASK(parent));
+       if (map_size < size) {
+               /* overflow */
+               *addr = 0;
+               return KERN_INVALID_ARGUMENT;
+       }
 
        /*
         *      Need reference on submap object because it is internal
@@ -673,11 +1255,13 @@ kmem_suballoc(
         */
        vm_object_reference(vm_submap_object);
 
-       map_addr = (flags & VM_FLAGS_ANYWHERE) ?
-                  vm_map_min(parent) : vm_map_trunc_page(*addr);
+       map_addr = ((flags & VM_FLAGS_ANYWHERE)
+                   ? vm_map_min(parent)
+                   : vm_map_trunc_page(*addr,
+                                       VM_MAP_PAGE_MASK(parent)));
 
        kr = vm_map_enter(parent, &map_addr, map_size,
-                         (vm_map_offset_t) 0, flags,
+                         (vm_map_offset_t) 0, flags, vmk_flags, tag,
                          vm_submap_object, (vm_object_offset_t) 0, FALSE,
                          VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
        if (kr != KERN_SUCCESS) {
@@ -689,13 +1273,16 @@ kmem_suballoc(
        map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable);
        if (map == VM_MAP_NULL)
                panic("kmem_suballoc: vm_map_create failed");   /* "can't happen" */
+       /* inherit the parent map's page size */
+       vm_map_set_page_shift(map, VM_MAP_PAGE_SHIFT(parent));
 
        kr = vm_map_submap(parent, map_addr, map_addr + map_size, map, map_addr, FALSE);
        if (kr != KERN_SUCCESS) {
                /*
                 * See comment preceding vm_map_submap().
                 */
-               vm_map_remove(parent, map_addr, map_addr + map_size, VM_MAP_NO_FLAGS);
+               vm_map_remove(parent, map_addr, map_addr + map_size,
+                             VM_MAP_REMOVE_NO_FLAGS);
                vm_map_deallocate(map); /* also removes ref to pmap */
                vm_object_deallocate(vm_submap_object);
                return (kr);
@@ -718,40 +1305,101 @@ kmem_init(
 {
        vm_map_offset_t map_start;
        vm_map_offset_t map_end;
+       vm_map_kernel_flags_t vmk_flags;
 
-       map_start = vm_map_trunc_page(start);
-       map_end = vm_map_round_page(end);
+       vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+       vmk_flags.vmkf_permanent = TRUE;
+       vmk_flags.vmkf_no_pmap_check = TRUE;
 
-       kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_ADDRESS,
-                                  map_end, FALSE);
+       map_start = vm_map_trunc_page(start,
+                                     VM_MAP_PAGE_MASK(kernel_map));
+       map_end = vm_map_round_page(end,
+                                   VM_MAP_PAGE_MASK(kernel_map));
 
+#if    defined(__arm__) || defined(__arm64__)
+       kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS,
+                           VM_MAX_KERNEL_ADDRESS, FALSE);
        /*
         *      Reserve virtual memory allocated up to this time.
         */
-
-       if (start != VM_MIN_KERNEL_ADDRESS) {
+       {
+               unsigned int    region_select = 0;
+               vm_map_offset_t region_start;
+               vm_map_size_t   region_size;
                vm_map_offset_t map_addr;
+               kern_return_t kr;
+
+               while (pmap_virtual_region(region_select, &region_start, &region_size)) {
+
+                       map_addr = region_start;
+                       kr = vm_map_enter(kernel_map, &map_addr,
+                                         vm_map_round_page(region_size,
+                                                           VM_MAP_PAGE_MASK(kernel_map)),
+                                         (vm_map_offset_t) 0,
+                                         VM_FLAGS_FIXED,
+                                         vmk_flags,
+                                         VM_KERN_MEMORY_NONE,
+                                         VM_OBJECT_NULL, 
+                                         (vm_object_offset_t) 0, FALSE, VM_PROT_NONE, VM_PROT_NONE,
+                                         VM_INHERIT_DEFAULT);
+
+                       if (kr != KERN_SUCCESS) {
+                               panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n",
+                                      (uint64_t) start, (uint64_t) end, (uint64_t) region_start,
+                                      (uint64_t) region_size, kr);
+                       }       
+
+                       region_select++;
+               }       
+       }
+#else
+       kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS,
+                           map_end, FALSE);
+       /*
+        *      Reserve virtual memory allocated up to this time.
+        */
+       if (start != VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
+               vm_map_offset_t map_addr;
+               kern_return_t kr;
+               vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+               vmk_flags.vmkf_no_pmap_check = TRUE;
+
+               map_addr = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
+               kr = vm_map_enter(kernel_map,
+                                 &map_addr, 
+                                 (vm_map_size_t)(map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
+                                 (vm_map_offset_t) 0,
+                                 VM_FLAGS_FIXED,
+                                 vmk_flags,
+                                 VM_KERN_MEMORY_NONE,
+                                 VM_OBJECT_NULL, 
+                                 (vm_object_offset_t) 0, FALSE,
+                                 VM_PROT_NONE, VM_PROT_NONE,
+                                 VM_INHERIT_DEFAULT);
+               
+               if (kr != KERN_SUCCESS) {
+                       panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n",
+                             (uint64_t) start, (uint64_t) end,
+                             (uint64_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS,
+                             (uint64_t) (map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
+                             kr);
+               }       
+       }
+#endif
 
-               map_addr = VM_MIN_KERNEL_ADDRESS;
-               (void) vm_map_enter(kernel_map,
-                                   &map_addr, 
-                                   (vm_map_size_t)(map_start - VM_MIN_KERNEL_ADDRESS),
-                                   (vm_map_offset_t) 0,
-                                   VM_FLAGS_ANYWHERE | VM_FLAGS_NO_PMAP_CHECK,
-                                   VM_OBJECT_NULL, 
-                                   (vm_object_offset_t) 0, FALSE,
-                                   VM_PROT_DEFAULT, VM_PROT_ALL,
-                                   VM_INHERIT_DEFAULT);
-       }
-
-        /*
-         * Account for kernel memory (text, data, bss, vm shenanigans).
-         * This may include inaccessible "holes" as determined by what
-         * the machine-dependent init code includes in max_mem.
-         */
-        vm_page_wire_count = (atop_64(max_mem) - (vm_page_free_count
-                                                + vm_page_active_count
-                                                + vm_page_inactive_count));
+       /*
+        * Set the default global user wire limit which limits the amount of
+        * memory that can be locked via mlock().  We set this to the total
+        * amount of memory that are potentially usable by a user app (max_mem)
+        * minus a certain amount.  This can be overridden via a sysctl.
+        */
+       vm_global_no_user_wire_amount = MIN(max_mem*20/100,
+                                           VM_NOT_USER_WIREABLE);
+       vm_global_user_wire_limit = max_mem - vm_global_no_user_wire_amount;
+       
+       /* the default per user limit is the same as the global limit */
+       vm_user_wire_limit = vm_global_user_wire_limit;
 }
 
 
@@ -825,116 +1473,95 @@ copyoutmap(
        return KERN_SUCCESS;
 }
 
+/*
+ *
+ *     The following two functions are to be used when exposing kernel
+ *     addresses to userspace via any of the various debug or info
+ *     facilities that exist. These are basically the same as VM_KERNEL_ADDRPERM()
+ *     and VM_KERNEL_UNSLIDE_OR_PERM() except they use a different random seed and
+ *     are exported to KEXTs.
+ *
+ *     NOTE: USE THE MACRO VERSIONS OF THESE FUNCTIONS (in vm_param.h) FROM WITHIN THE KERNEL
+ */
 
-kern_return_t
-vm_conflict_check(
-       vm_map_t                map,
-       vm_map_offset_t off,
-       vm_map_size_t           len,
-       memory_object_t pager,
-       vm_object_offset_t      file_off)
+static void
+vm_kernel_addrhash_internal(
+       vm_offset_t addr,
+       vm_offset_t *hash_addr,
+       uint64_t salt)
 {
-       vm_map_entry_t          entry;
-       vm_object_t             obj;
-       vm_object_offset_t      obj_off;
-       vm_map_t                base_map;
-       vm_map_offset_t         base_offset;
-       vm_map_offset_t         original_offset;
-       kern_return_t           kr;
-       vm_map_size_t           local_len;
+       assert(salt != 0);
 
-       base_map = map;
-       base_offset = off;
-       original_offset = off;
-       kr = KERN_SUCCESS;
-       vm_map_lock(map);
-       while(vm_map_lookup_entry(map, off, &entry)) {
-               local_len = len;
+       if (addr == 0) {
+               *hash_addr = 0;
+               return;
+       }
 
-               if (entry->object.vm_object == VM_OBJECT_NULL) {
-                       vm_map_unlock(map);
-                       return KERN_SUCCESS;
-               }
-               if (entry->is_sub_map) {
-                       vm_map_t        old_map;
-
-                       old_map = map;
-                       vm_map_lock(entry->object.sub_map);
-                       map = entry->object.sub_map;
-                       off = entry->offset + (off - entry->vme_start);
-                       vm_map_unlock(old_map);
-                       continue;
-               }
-               obj = entry->object.vm_object;
-               obj_off = (off - entry->vme_start) + entry->offset;
-               while(obj->shadow) {
-                       obj_off += obj->shadow_offset;
-                       obj = obj->shadow;
-               }
-               if((obj->pager_created) && (obj->pager == pager)) {
-                       if(((obj->paging_offset) + obj_off) == file_off) {
-                               if(off != base_offset) {
-                                       vm_map_unlock(map);
-                                       return KERN_FAILURE;
-                               }
-                               kr = KERN_ALREADY_WAITING;
-                       } else {
-                               vm_object_offset_t      obj_off_aligned;
-                               vm_object_offset_t      file_off_aligned;
-
-                               obj_off_aligned = obj_off & ~PAGE_MASK;
-                               file_off_aligned = file_off & ~PAGE_MASK;
-
-                               if (file_off_aligned == (obj->paging_offset + obj_off_aligned)) {
-                                       /*
-                                        * the target map and the file offset start in the same page
-                                        * but are not identical... 
-                                        */
-                                       vm_map_unlock(map);
-                                       return KERN_FAILURE;
-                               }
-                               if ((file_off < (obj->paging_offset + obj_off_aligned)) &&
-                                   ((file_off + len) > (obj->paging_offset + obj_off_aligned))) {
-                                       /*
-                                        * some portion of the tail of the I/O will fall
-                                        * within the encompass of the target map
-                                        */
-                                       vm_map_unlock(map);
-                                       return KERN_FAILURE;
-                               }
-                               if ((file_off_aligned > (obj->paging_offset + obj_off)) &&
-                                   (file_off_aligned < (obj->paging_offset + obj_off) + len)) {
-                                       /*
-                                        * the beginning page of the file offset falls within
-                                        * the target map's encompass
-                                        */
-                                       vm_map_unlock(map);
-                                       return KERN_FAILURE;
-                               }
-                       }
-               } else if(kr != KERN_SUCCESS) {
-                       vm_map_unlock(map);
-                       return KERN_FAILURE;
-               }
+       if (VM_KERNEL_IS_SLID(addr)) {
+               *hash_addr = VM_KERNEL_UNSLIDE(addr);
+               return;
+       }
 
-               if(len <= ((entry->vme_end - entry->vme_start) -
-                                               (off - entry->vme_start))) {
-                       vm_map_unlock(map);
-                       return kr;
-               } else {
-                       len -= (entry->vme_end - entry->vme_start) -
-                                               (off - entry->vme_start);
-               }
-               base_offset = base_offset + (local_len - len);
-               file_off = file_off + (local_len - len);
-               off = base_offset;
-               if(map != base_map) {
-                       vm_map_unlock(map);
-                       vm_map_lock(base_map);
-                       map = base_map;
-               }
+       vm_offset_t sha_digest[SHA256_DIGEST_LENGTH/sizeof(vm_offset_t)];
+       SHA256_CTX sha_ctx;
+
+       SHA256_Init(&sha_ctx);
+       SHA256_Update(&sha_ctx, &salt, sizeof(salt));
+       SHA256_Update(&sha_ctx, &addr, sizeof(addr));
+       SHA256_Final(sha_digest, &sha_ctx);
+
+       *hash_addr = sha_digest[0];
+}
+
+void
+vm_kernel_addrhash_external(
+       vm_offset_t addr,
+       vm_offset_t *hash_addr)
+{
+       return vm_kernel_addrhash_internal(addr, hash_addr, vm_kernel_addrhash_salt_ext);
+}
+
+vm_offset_t
+vm_kernel_addrhash(vm_offset_t addr)
+{
+       vm_offset_t hash_addr;
+       vm_kernel_addrhash_internal(addr, &hash_addr, vm_kernel_addrhash_salt);
+       return hash_addr;
+}
+
+void
+vm_kernel_addrhide(
+       vm_offset_t addr,
+       vm_offset_t *hide_addr)
+{
+       *hide_addr = VM_KERNEL_ADDRHIDE(addr);
+}
+
+/*
+ *     vm_kernel_addrperm_external:
+ *     vm_kernel_unslide_or_perm_external:
+ *
+ *     Use these macros when exposing an address to userspace that could come from
+ *     either kernel text/data *or* the heap.
+ */
+void
+vm_kernel_addrperm_external(
+       vm_offset_t addr,
+       vm_offset_t *perm_addr)
+{
+       if (VM_KERNEL_IS_SLID(addr)) {
+               *perm_addr = VM_KERNEL_UNSLIDE(addr);
+       } else if (VM_KERNEL_ADDRESS(addr)) {
+               *perm_addr = addr + vm_kernel_addrperm_ext;
+       } else {
+               *perm_addr = addr;
        }
+}
 
-       vm_map_unlock(map);
-       return kr;
+void
+vm_kernel_unslide_or_perm_external(
+       vm_offset_t addr,
+       vm_offset_t *up_addr)
+{
+       vm_kernel_addrperm_external(addr, up_addr);
 }