]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_kern.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_kern.c
index d015ebd2c7cc79e1728f7fc89411875777cd5feb..8e53cbd13802039bf99dba3315ee828938eebbc6 100644 (file)
@@ -71,6 +71,7 @@
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
+#include <vm/vm_compressor.h>
 #include <vm/vm_pageout.h>
 #include <kern/misc_protos.h>
 #include <vm/cpm.h>
 #include <string.h>
 
 #include <libkern/OSDebug.h>
+#include <libkern/crypto/sha2.h>
 #include <sys/kdebug.h>
 
+#include <san/kasan.h>
+
 /*
  *     Variables exported by this module.
  */
@@ -93,9 +97,9 @@ extern boolean_t vm_kernel_ready;
  * Forward declarations for internal functions.
  */
 extern kern_return_t kmem_alloc_pages(
-       register vm_object_t            object,
-       register vm_object_offset_t     offset,
-       register vm_object_size_t       size);
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       vm_object_size_t        size);
 
 kern_return_t
 kmem_alloc_contig(
@@ -117,6 +121,8 @@ kmem_alloc_contig(
        vm_page_t               m, pages;
        kern_return_t           kr;
 
+    assert(VM_KERN_MEMORY_NONE != tag);
+
        if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT))) 
                return KERN_INVALID_ARGUMENT;
 
@@ -142,7 +148,8 @@ kmem_alloc_contig(
                object = vm_object_allocate(map_size);
        }
 
-       kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
+       kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0,
+                              VM_MAP_KERNEL_FLAGS_NONE, tag, &entry);
        if (KERN_SUCCESS != kr) {
                vm_object_deallocate(object);
                return kr;
@@ -155,7 +162,6 @@ kmem_alloc_contig(
        }
        VME_OBJECT_SET(entry, object);
        VME_OFFSET_SET(entry, offset);
-       VME_ALIAS_SET(entry, tag);
 
        /* Take an extra object ref in case the map entry gets deleted */
        vm_object_reference(object);
@@ -169,7 +175,7 @@ kmem_alloc_contig(
                                                VM_MAP_PAGE_MASK(map)),
                              vm_map_round_page(map_addr + map_size,
                                                VM_MAP_PAGE_MASK(map)),
-                             0);
+                             VM_MAP_REMOVE_NO_FLAGS);
                vm_object_deallocate(object);
                *addrp = 0;
                return kr;
@@ -180,17 +186,17 @@ kmem_alloc_contig(
                m = pages;
                pages = NEXT_PAGE(m);
                *(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
-               m->busy = FALSE;
+               m->vmp_busy = FALSE;
                vm_page_insert(m, object, offset + i);
        }
        vm_object_unlock(object);
 
-       kr = vm_map_wire(map,
+       kr = vm_map_wire_kernel(map,
                         vm_map_trunc_page(map_addr,
                                           VM_MAP_PAGE_MASK(map)),
                         vm_map_round_page(map_addr + map_size,
                                           VM_MAP_PAGE_MASK(map)),
-                        VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(tag),
+                        VM_PROT_DEFAULT, tag,
                         FALSE);
 
        if (kr != KERN_SUCCESS) {
@@ -204,17 +210,19 @@ kmem_alloc_contig(
                                                VM_MAP_PAGE_MASK(map)), 
                              vm_map_round_page(map_addr + map_size,
                                                VM_MAP_PAGE_MASK(map)),
-                             0);
+                             VM_MAP_REMOVE_NO_FLAGS);
                vm_object_deallocate(object);
                return kr;
        }
        vm_object_deallocate(object);
 
-       if (object == kernel_object)
+       if (object == kernel_object) {
                vm_map_simplify(map, map_addr);
-
+           vm_tag_update_size(tag, map_size);
+    }
        *addrp = (vm_offset_t) map_addr;
        assert((vm_map_offset_t) *addrp == map_addr);
+
        return KERN_SUCCESS;
 }
 
@@ -237,10 +245,10 @@ kmem_alloc_contig(
 
 kern_return_t
 kernel_memory_allocate(
-       register vm_map_t       map,
-       register vm_offset_t    *addrp,
-       register vm_size_t      size,
-       register vm_offset_t    mask,
+       vm_map_t        map,
+       vm_offset_t     *addrp,
+       vm_size_t       size,
+       vm_offset_t     mask,
        int                     flags,
        vm_tag_t                tag)
 {
@@ -257,8 +265,10 @@ kernel_memory_allocate(
        vm_page_t               wired_page_list = NULL;
        int                     guard_page_count = 0;
        int                     wired_page_count = 0;
+       int                     page_grab_count = 0;
        int                     i;
        int                     vm_alloc_flags;
+       vm_map_kernel_flags_t   vmk_flags;
        vm_prot_t               kma_prot;
 
        if (! vm_kernel_ready) {
@@ -269,7 +279,8 @@ kernel_memory_allocate(
                                     VM_MAP_PAGE_MASK(map));
        map_mask = (vm_map_offset_t) mask;
 
-       vm_alloc_flags = VM_MAKE_TAG(tag);
+       vm_alloc_flags = 0; //VM_MAKE_TAG(tag);
+       vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
 
        /* Check for zero allocation size (either directly or via overflow) */
        if (map_size == 0) {
@@ -281,9 +292,11 @@ kernel_memory_allocate(
         * limit the size of a single extent of wired memory
         * to try and limit the damage to the system if
         * too many pages get wired down
-        * limit raised to 2GB with 128GB max physical limit
+        * limit raised to 2GB with 128GB max physical limit,
+        * but scaled by installed memory above this
         */
-        if ( !(flags & KMA_VAONLY) && map_size > (1ULL << 31)) {
+        if (!(flags & (KMA_VAONLY | KMA_PAGEABLE)) &&
+           map_size > MAX(1ULL<<31, sane_size/64)) {
                 return KERN_RESOURCE_SHORTAGE;
         }
 
@@ -306,7 +319,7 @@ kernel_memory_allocate(
        fill_size = map_size;
 
        if (flags & KMA_GUARD_FIRST) {
-               vm_alloc_flags |= VM_FLAGS_GUARD_BEFORE;
+               vmk_flags.vmkf_guard_before = TRUE;
                fill_start += PAGE_SIZE_64;
                fill_size -= PAGE_SIZE_64;
                if (map_size < fill_start + fill_size) {
@@ -317,7 +330,7 @@ kernel_memory_allocate(
                guard_page_count++;
        }
        if (flags & KMA_GUARD_LAST) {
-               vm_alloc_flags |= VM_FLAGS_GUARD_AFTER;
+               vmk_flags.vmkf_guard_after = TRUE;
                fill_size -= PAGE_SIZE_64;
                if (map_size <= fill_start + fill_size) {
                        /* no space for a guard page */
@@ -329,6 +342,10 @@ kernel_memory_allocate(
        wired_page_count = (int) (fill_size / PAGE_SIZE_64);
        assert(wired_page_count * PAGE_SIZE_64 == fill_size);
 
+#if DEBUG || DEVELOPMENT
+       VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START, size, 0, 0, 0);
+#endif
+
        for (i = 0; i < guard_page_count; i++) {
                for (;;) {
                        mem = vm_page_grab_guard();
@@ -341,11 +358,11 @@ kernel_memory_allocate(
                        }
                        vm_page_more_fictitious();
                }
-               mem->pageq.next = (queue_entry_t)guard_page_list;
+               mem->vmp_snext = guard_page_list;
                guard_page_list = mem;
        }
 
-       if (! (flags & KMA_VAONLY)) {
+       if (!(flags & (KMA_VAONLY | KMA_PAGEABLE))) {
        for (i = 0; i < wired_page_count; i++) {
                uint64_t        unavailable;
                
@@ -374,7 +391,9 @@ kernel_memory_allocate(
                        }
                        VM_PAGE_WAIT();
                }
-               mem->pageq.next = (queue_entry_t)wired_page_list;
+               page_grab_count++;
+               if (KMA_ZERO & flags) vm_page_zero_fill(mem);
+               mem->vmp_snext = wired_page_list;
                wired_page_list = mem;
        }
        }
@@ -393,9 +412,12 @@ kernel_memory_allocate(
                object = vm_object_allocate(map_size);
        }
 
+       if (flags & KMA_ATOMIC)
+               vmk_flags.vmkf_atomic_entry = TRUE;
+
        kr = vm_map_find_space(map, &map_addr,
                               fill_size, map_mask,
-                              vm_alloc_flags, &entry);
+                              vm_alloc_flags, vmk_flags, tag, &entry);
        if (KERN_SUCCESS != kr) {
                vm_object_deallocate(object);
                goto out;
@@ -409,7 +431,7 @@ kernel_memory_allocate(
        VME_OBJECT_SET(entry, object);
        VME_OFFSET_SET(entry, offset);
        
-       if (object != compressor_object)
+       if (!(flags & (KMA_COMPRESSOR | KMA_PAGEABLE)))
                entry->wired_count++;
 
        if (flags & KMA_PERMANENT)
@@ -428,18 +450,25 @@ kernel_memory_allocate(
                        panic("kernel_memory_allocate: guard_page_list == NULL");
 
                mem = guard_page_list;
-               guard_page_list = (vm_page_t)mem->pageq.next;
-               mem->pageq.next = NULL;
+               guard_page_list = mem->vmp_snext;
+               mem->vmp_snext = NULL;
 
                vm_page_insert(mem, object, offset + pg_offset);
 
-               mem->busy = FALSE;
+               mem->vmp_busy = FALSE;
                pg_offset += PAGE_SIZE_64;
        }
 
        kma_prot = VM_PROT_READ | VM_PROT_WRITE;
 
-       if (flags & KMA_VAONLY) {
+#if KASAN
+       if (!(flags & KMA_VAONLY)) {
+               /* for VAONLY mappings we notify in populate only */
+               kasan_notify_address(map_addr, size);
+       }
+#endif
+
+       if (flags & (KMA_VAONLY | KMA_PAGEABLE)) {
                pg_offset = fill_start + fill_size;
        } else {
        for (pg_offset = fill_start; pg_offset < fill_start + fill_size; pg_offset += PAGE_SIZE_64) {
@@ -447,15 +476,24 @@ kernel_memory_allocate(
                        panic("kernel_memory_allocate: wired_page_list == NULL");
 
                mem = wired_page_list;
-               wired_page_list = (vm_page_t)mem->pageq.next;
-               mem->pageq.next = NULL;
-               mem->wire_count++;
+               wired_page_list = mem->vmp_snext;
+               mem->vmp_snext = NULL;
+
+               assert(mem->vmp_wire_count == 0);
+               assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
+
+               mem->vmp_q_state = VM_PAGE_IS_WIRED;
+               mem->vmp_wire_count++;
+               if (__improbable(mem->vmp_wire_count == 0)) {
+                       panic("kernel_memory_allocate(%p): wire_count overflow",
+                             mem);
+               }
 
                vm_page_insert_wired(mem, object, offset + pg_offset, tag);
 
-               mem->busy = FALSE;
-               mem->pmapped = TRUE;
-               mem->wpmapped = TRUE;
+               mem->vmp_busy = FALSE;
+               mem->vmp_pmapped = TRUE;
+               mem->vmp_wpmapped = TRUE;
 
                PMAP_ENTER_OPTIONS(kernel_pmap, map_addr + pg_offset, mem,
                                   kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
@@ -465,33 +503,38 @@ kernel_memory_allocate(
                        vm_object_unlock(object);
 
                        PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem, 
-                                  kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE);
+                                  kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+                                  pe_result);
 
                        vm_object_lock(object);
                }
+
+               assert(pe_result == KERN_SUCCESS);
+
                if (flags & KMA_NOENCRYPT) {
                        bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE);
 
-                       pmap_set_noencrypt(mem->phys_page);
+                       pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
                }
        }
+       if (kernel_object == object) vm_tag_update_size(tag, fill_size);
        }
        if ((fill_start + fill_size) < map_size) {
                if (guard_page_list == NULL)
                        panic("kernel_memory_allocate: guard_page_list == NULL");
 
                mem = guard_page_list;
-               guard_page_list = (vm_page_t)mem->pageq.next;
-               mem->pageq.next = NULL;
+               guard_page_list = mem->vmp_snext;
+               mem->vmp_snext = NULL;
 
                vm_page_insert(mem, object, offset + pg_offset);
 
-               mem->busy = FALSE;
+               mem->vmp_busy = FALSE;
        }
        if (guard_page_list || wired_page_list)
                panic("kernel_memory_allocate: non empty list\n");
 
-       if (! (flags & KMA_VAONLY)) {
+       if (!(flags & (KMA_VAONLY | KMA_PAGEABLE))) {
        vm_page_lockspin_queues();
        vm_page_wire_count += wired_page_count;
        vm_page_unlock_queues();
@@ -507,6 +550,10 @@ kernel_memory_allocate(
        else
                vm_object_deallocate(object);
 
+#if DEBUG || DEVELOPMENT
+       VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
+#endif
+
        /*
         *      Return the memory, not zeroed.
         */
@@ -520,6 +567,10 @@ out:
        if (wired_page_list)
                vm_page_free_list(wired_page_list, FALSE);
 
+#if DEBUG || DEVELOPMENT
+       VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
+#endif
+
        return kr;
 }
 
@@ -537,8 +588,13 @@ kernel_memory_populate(
        vm_page_t               mem;
        vm_page_t               page_list = NULL;
        int                     page_count = 0;
+       int                     page_grab_count = 0;
        int                     i;
 
+#if DEBUG || DEVELOPMENT
+       VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START, size, 0, 0, 0);
+#endif
+
        page_count = (int) (size / PAGE_SIZE_64);
 
        assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
@@ -556,13 +612,15 @@ kernel_memory_populate(
                                
                                VM_PAGE_WAIT();
                        }
-                       mem->pageq.next = (queue_entry_t) page_list;
+                       page_grab_count++;
+                       if (KMA_ZERO & flags) vm_page_zero_fill(mem);
+                       mem->vmp_snext = page_list;
                        page_list = mem;
 
                        pg_offset -= PAGE_SIZE_64;
 
                        kr = pmap_enter_options(kernel_pmap,
-                                                 addr + pg_offset, mem->phys_page,
+                                                 addr + pg_offset, VM_PAGE_GET_PHYS_PAGE(mem),
                                                  VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
                                                  PMAP_OPTIONS_INTERNAL, NULL);
                        assert(kr == KERN_SUCCESS);
@@ -579,19 +637,30 @@ kernel_memory_populate(
                     pg_offset += PAGE_SIZE_64) {
 
                        mem = page_list;
-                       page_list = (vm_page_t) mem->pageq.next;
-                       mem->pageq.next = NULL;
+                       page_list = mem->vmp_snext;
+                       mem->vmp_snext = NULL;
 
                        vm_page_insert(mem, object, offset + pg_offset);
-                       assert(mem->busy);
+                       assert(mem->vmp_busy);
 
-                       mem->busy = FALSE;
-                       mem->pmapped = TRUE;
-                       mem->wpmapped = TRUE;
-                       mem->compressor = TRUE;
+                       mem->vmp_busy = FALSE;
+                       mem->vmp_pmapped = TRUE;
+                       mem->vmp_wpmapped = TRUE;
+                       mem->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR;
                }
                vm_object_unlock(object);
 
+#if KASAN
+               if (map == compressor_map) {
+                       kasan_notify_address_nopoison(addr, size);
+               } else {
+                       kasan_notify_address(addr, size);
+               }
+#endif
+
+#if DEBUG || DEVELOPMENT
+               VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
+#endif
                return KERN_SUCCESS;
        }
 
@@ -616,7 +685,9 @@ kernel_memory_populate(
                        }
                        VM_PAGE_WAIT();
                }
-               mem->pageq.next = (queue_entry_t) page_list;
+               page_grab_count++;
+               if (KMA_ZERO & flags) vm_page_zero_fill(mem);
+               mem->vmp_snext = page_list;
                page_list = mem;
        }
        if (flags & KMA_KOBJECT) {
@@ -646,16 +717,21 @@ kernel_memory_populate(
                        panic("kernel_memory_populate: page_list == NULL");
 
                mem = page_list;
-               page_list = (vm_page_t) mem->pageq.next;
-               mem->pageq.next = NULL;
-
-               mem->wire_count++;
+               page_list = mem->vmp_snext;
+               mem->vmp_snext = NULL;
+
+               assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
+               mem->vmp_q_state = VM_PAGE_IS_WIRED;
+               mem->vmp_wire_count++;
+               if (__improbable(mem->vmp_wire_count == 0)) {
+                       panic("kernel_memory_populate(%p): wire_count overflow", mem);
+               }
 
                vm_page_insert_wired(mem, object, offset + pg_offset, tag);
 
-               mem->busy = FALSE;
-               mem->pmapped = TRUE;
-               mem->wpmapped = TRUE;
+               mem->vmp_busy = FALSE;
+               mem->vmp_pmapped = TRUE;
+               mem->vmp_wpmapped = TRUE;
 
                PMAP_ENTER_OPTIONS(kernel_pmap, addr + pg_offset, mem,
                                   VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
@@ -668,27 +744,48 @@ kernel_memory_populate(
 
                        PMAP_ENTER(kernel_pmap, addr + pg_offset, mem,
                                   VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
-                                  ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE);
+                                  ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+                                  pe_result);
 
                        vm_object_lock(object);
                }
+
+               assert(pe_result == KERN_SUCCESS);
+
                if (flags & KMA_NOENCRYPT) {
                        bzero(CAST_DOWN(void *, (addr + pg_offset)), PAGE_SIZE);
-                       pmap_set_noencrypt(mem->phys_page);
+                       pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
                }
        }
-       vm_page_lock_queues();
+       vm_page_lockspin_queues();
        vm_page_wire_count += page_count;
        vm_page_unlock_queues();
 
+#if DEBUG || DEVELOPMENT
+       VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
+#endif
+
+       if (kernel_object == object) vm_tag_update_size(tag, size);
+
        vm_object_unlock(object);
 
+#if KASAN
+       if (map == compressor_map) {
+               kasan_notify_address_nopoison(addr, size);
+       } else {
+               kasan_notify_address(addr, size);
+       }
+#endif
        return KERN_SUCCESS;
 
 out:
        if (page_list)
                vm_page_free_list(page_list, FALSE);
 
+#if DEBUG || DEVELOPMENT
+       VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
+#endif
+
        return kr;
 }
 
@@ -715,7 +812,6 @@ kernel_memory_depopulate(
        } else if (flags & KMA_KOBJECT) {
                offset = addr;
                object = kernel_object;
-
                vm_object_lock(object);
        } else {
                offset = 0;
@@ -740,18 +836,22 @@ kernel_memory_depopulate(
                mem = vm_page_lookup(object, offset + pg_offset);
 
                assert(mem);
+               
+               if (mem->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR)
+                       pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
 
-               pmap_disconnect(mem->phys_page);
-
-               mem->busy = TRUE;
+               mem->vmp_busy = TRUE;
 
-               assert(mem->tabled);
+               assert(mem->vmp_tabled);
                vm_page_remove(mem, TRUE);
-               assert(mem->busy);
+               assert(mem->vmp_busy);
+
+               assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
+               assert((mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
+                      (mem->vmp_q_state == VM_PAGE_NOT_ON_Q));
 
-               assert(mem->pageq.next == NULL &&
-                      mem->pageq.prev == NULL);
-               mem->pageq.next = (queue_entry_t)local_freeq;
+               mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
+               mem->vmp_snext = local_freeq;
                local_freeq = mem;
        }
        vm_object_unlock(object);
@@ -776,14 +876,26 @@ kmem_alloc_external(
     return (kmem_alloc(map, addrp, size, vm_tag_bt()));
 }
 
+
 kern_return_t
 kmem_alloc(
        vm_map_t        map,
        vm_offset_t     *addrp,
        vm_size_t       size,
-       vm_tag_t        tag)
+       vm_tag_t        tag)
+{
+       return kmem_alloc_flags(map, addrp, size, tag, 0);
+}
+
+kern_return_t
+kmem_alloc_flags(
+       vm_map_t        map,
+       vm_offset_t     *addrp,
+       vm_size_t       size,
+       vm_tag_t        tag,
+       int             flags)
 {
-       kern_return_t kr = kernel_memory_allocate(map, addrp, size, 0, 0, tag);
+       kern_return_t kr = kernel_memory_allocate(map, addrp, size, 0, flags, tag);
        TRACE_MACHLEAKS(KMEM_ALLOC_CODE, KMEM_ALLOC_CODE_2, size, *addrp);
        return kr;
 }
@@ -826,7 +938,11 @@ kmem_realloc(
        oldmapsize = oldmapmax - oldmapmin;
        newmapsize = vm_map_round_page(newsize,
                                       VM_MAP_PAGE_MASK(map));
-
+       if (newmapsize < newsize) {
+               /* overflow */
+               *newaddrp = 0;
+               return KERN_INVALID_ARGUMENT;
+       }
 
        /*
         *      Find the VM object backing the old region.
@@ -864,7 +980,10 @@ kmem_realloc(
         */
 
        kr = vm_map_find_space(map, &newmapaddr, newmapsize,
-                              (vm_map_offset_t) 0, 0, &newentry);
+                              (vm_map_offset_t) 0, 0,
+                              VM_MAP_KERNEL_FLAGS_NONE,
+                              tag,
+                              &newentry);
        if (kr != KERN_SUCCESS) {
                vm_object_lock(object);
                for(offset = oldmapsize; 
@@ -880,7 +999,6 @@ kmem_realloc(
        }
        VME_OBJECT_SET(newentry, object);
        VME_OFFSET_SET(newentry, 0);
-       VME_ALIAS_SET(newentry, tag);
        assert(newentry->wired_count == 0);
 
        
@@ -889,10 +1007,10 @@ kmem_realloc(
        vm_object_reference(object);
        vm_map_unlock(map);
 
-       kr = vm_map_wire(map, newmapaddr, newmapaddr + newmapsize,
-                        VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(tag), FALSE);
+       kr = vm_map_wire_kernel(map, newmapaddr, newmapaddr + newmapsize,
+                        VM_PROT_DEFAULT, tag, FALSE);
        if (KERN_SUCCESS != kr) {
-               vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, 0);
+               vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, VM_MAP_REMOVE_NO_FLAGS);
                vm_object_lock(object);
                for(offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) {
                        if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
@@ -906,6 +1024,8 @@ kmem_realloc(
        }
        vm_object_deallocate(object);
 
+       if (kernel_object == object) vm_tag_update_size(tag, newmapsize);
+
        *newaddrp = CAST_DOWN(vm_offset_t, newmapaddr);
        return KERN_SUCCESS;
 }
@@ -992,16 +1112,26 @@ kmem_alloc_pageable(
 #endif
        map_size = vm_map_round_page(size,
                                     VM_MAP_PAGE_MASK(map));
+       if (map_size < size) {
+               /* overflow */
+               *addrp = 0;
+               return KERN_INVALID_ARGUMENT;
+       }
 
        kr = vm_map_enter(map, &map_addr, map_size,
                          (vm_map_offset_t) 0, 
-                         VM_FLAGS_ANYWHERE | VM_MAKE_TAG(tag),
+                         VM_FLAGS_ANYWHERE,
+                         VM_MAP_KERNEL_FLAGS_NONE,
+                         tag,
                          VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
                          VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
 
        if (kr != KERN_SUCCESS)
                return kr;
 
+#if KASAN
+       kasan_notify_address(map_addr, map_size);
+#endif
        *addrp = CAST_DOWN(vm_offset_t, map_addr);
        return KERN_SUCCESS;
 }
@@ -1049,16 +1179,16 @@ kmem_free(
 
 kern_return_t
 kmem_alloc_pages(
-       register vm_object_t            object,
-       register vm_object_offset_t     offset,
-       register vm_object_size_t       size)
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       vm_object_size_t        size)
 {
        vm_object_size_t                alloc_size;
 
        alloc_size = vm_object_round_page(size);
         vm_object_lock(object);
        while (alloc_size) {
-           register vm_page_t  mem;
+           vm_page_t   mem;
 
 
            /*
@@ -1070,7 +1200,7 @@ kmem_alloc_pages(
                VM_PAGE_WAIT();
                vm_object_lock(object);
            }
-           mem->busy = FALSE;
+           mem->vmp_busy = FALSE;
 
            alloc_size -= PAGE_SIZE;
            offset += PAGE_SIZE;
@@ -1101,6 +1231,8 @@ kmem_suballoc(
        vm_size_t       size,
        boolean_t       pageable,
        int             flags,
+       vm_map_kernel_flags_t vmk_flags,
+       vm_tag_t    tag,
        vm_map_t        *new_map)
 {
        vm_map_t        map;
@@ -1110,6 +1242,11 @@ kmem_suballoc(
 
        map_size = vm_map_round_page(size,
                                     VM_MAP_PAGE_MASK(parent));
+       if (map_size < size) {
+               /* overflow */
+               *addr = 0;
+               return KERN_INVALID_ARGUMENT;
+       }
 
        /*
         *      Need reference on submap object because it is internal
@@ -1124,7 +1261,7 @@ kmem_suballoc(
                                        VM_MAP_PAGE_MASK(parent)));
 
        kr = vm_map_enter(parent, &map_addr, map_size,
-                         (vm_map_offset_t) 0, flags,
+                         (vm_map_offset_t) 0, flags, vmk_flags, tag,
                          vm_submap_object, (vm_object_offset_t) 0, FALSE,
                          VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
        if (kr != KERN_SUCCESS) {
@@ -1144,7 +1281,8 @@ kmem_suballoc(
                /*
                 * See comment preceding vm_map_submap().
                 */
-               vm_map_remove(parent, map_addr, map_addr + map_size, VM_MAP_NO_FLAGS);
+               vm_map_remove(parent, map_addr, map_addr + map_size,
+                             VM_MAP_REMOVE_NO_FLAGS);
                vm_map_deallocate(map); /* also removes ref to pmap */
                vm_object_deallocate(vm_submap_object);
                return (kr);
@@ -1167,12 +1305,54 @@ kmem_init(
 {
        vm_map_offset_t map_start;
        vm_map_offset_t map_end;
+       vm_map_kernel_flags_t vmk_flags;
+
+       vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+       vmk_flags.vmkf_permanent = TRUE;
+       vmk_flags.vmkf_no_pmap_check = TRUE;
 
        map_start = vm_map_trunc_page(start,
                                      VM_MAP_PAGE_MASK(kernel_map));
        map_end = vm_map_round_page(end,
                                    VM_MAP_PAGE_MASK(kernel_map));
 
+#if    defined(__arm__) || defined(__arm64__)
+       kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS,
+                           VM_MAX_KERNEL_ADDRESS, FALSE);
+       /*
+        *      Reserve virtual memory allocated up to this time.
+        */
+       {
+               unsigned int    region_select = 0;
+               vm_map_offset_t region_start;
+               vm_map_size_t   region_size;
+               vm_map_offset_t map_addr;
+               kern_return_t kr;
+
+               while (pmap_virtual_region(region_select, &region_start, &region_size)) {
+
+                       map_addr = region_start;
+                       kr = vm_map_enter(kernel_map, &map_addr,
+                                         vm_map_round_page(region_size,
+                                                           VM_MAP_PAGE_MASK(kernel_map)),
+                                         (vm_map_offset_t) 0,
+                                         VM_FLAGS_FIXED,
+                                         vmk_flags,
+                                         VM_KERN_MEMORY_NONE,
+                                         VM_OBJECT_NULL, 
+                                         (vm_object_offset_t) 0, FALSE, VM_PROT_NONE, VM_PROT_NONE,
+                                         VM_INHERIT_DEFAULT);
+
+                       if (kr != KERN_SUCCESS) {
+                               panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n",
+                                      (uint64_t) start, (uint64_t) end, (uint64_t) region_start,
+                                      (uint64_t) region_size, kr);
+                       }       
+
+                       region_select++;
+               }       
+       }
+#else
        kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS,
                            map_end, FALSE);
        /*
@@ -1182,16 +1362,21 @@ kmem_init(
                vm_map_offset_t map_addr;
                kern_return_t kr;
  
+               vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+               vmk_flags.vmkf_no_pmap_check = TRUE;
+
                map_addr = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
                kr = vm_map_enter(kernel_map,
-                       &map_addr, 
-                       (vm_map_size_t)(map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
-                       (vm_map_offset_t) 0,
-                       VM_FLAGS_FIXED | VM_FLAGS_NO_PMAP_CHECK,
-                       VM_OBJECT_NULL, 
-                       (vm_object_offset_t) 0, FALSE,
-                       VM_PROT_NONE, VM_PROT_NONE,
-                       VM_INHERIT_DEFAULT);
+                                 &map_addr, 
+                                 (vm_map_size_t)(map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
+                                 (vm_map_offset_t) 0,
+                                 VM_FLAGS_FIXED,
+                                 vmk_flags,
+                                 VM_KERN_MEMORY_NONE,
+                                 VM_OBJECT_NULL, 
+                                 (vm_object_offset_t) 0, FALSE,
+                                 VM_PROT_NONE, VM_PROT_NONE,
+                                 VM_INHERIT_DEFAULT);
                
                if (kr != KERN_SUCCESS) {
                        panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n",
@@ -1201,6 +1386,7 @@ kmem_init(
                              kr);
                }       
        }
+#endif
 
        /*
         * Set the default global user wire limit which limits the amount of
@@ -1287,120 +1473,6 @@ copyoutmap(
        return KERN_SUCCESS;
 }
 
-
-kern_return_t
-vm_conflict_check(
-       vm_map_t                map,
-       vm_map_offset_t off,
-       vm_map_size_t           len,
-       memory_object_t pager,
-       vm_object_offset_t      file_off)
-{
-       vm_map_entry_t          entry;
-       vm_object_t             obj;
-       vm_object_offset_t      obj_off;
-       vm_map_t                base_map;
-       vm_map_offset_t         base_offset;
-       vm_map_offset_t         original_offset;
-       kern_return_t           kr;
-       vm_map_size_t           local_len;
-
-       base_map = map;
-       base_offset = off;
-       original_offset = off;
-       kr = KERN_SUCCESS;
-       vm_map_lock(map);
-       while(vm_map_lookup_entry(map, off, &entry)) {
-               local_len = len;
-
-               if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
-                       vm_map_unlock(map);
-                       return KERN_SUCCESS;
-               }
-               if (entry->is_sub_map) {
-                       vm_map_t        old_map;
-
-                       old_map = map;
-                       vm_map_lock(VME_SUBMAP(entry));
-                       map = VME_SUBMAP(entry);
-                       off = VME_OFFSET(entry) + (off - entry->vme_start);
-                       vm_map_unlock(old_map);
-                       continue;
-               }
-               obj = VME_OBJECT(entry);
-               obj_off = (off - entry->vme_start) + VME_OFFSET(entry);
-               while(obj->shadow) {
-                       obj_off += obj->vo_shadow_offset;
-                       obj = obj->shadow;
-               }
-               if((obj->pager_created) && (obj->pager == pager)) {
-                       if(((obj->paging_offset) + obj_off) == file_off) {
-                               if(off != base_offset) {
-                                       vm_map_unlock(map);
-                                       return KERN_FAILURE;
-                               }
-                               kr = KERN_ALREADY_WAITING;
-                       } else {
-                               vm_object_offset_t      obj_off_aligned;
-                               vm_object_offset_t      file_off_aligned;
-
-                               obj_off_aligned = obj_off & ~PAGE_MASK;
-                               file_off_aligned = file_off & ~PAGE_MASK;
-
-                               if (file_off_aligned == (obj->paging_offset + obj_off_aligned)) {
-                                       /*
-                                        * the target map and the file offset start in the same page
-                                        * but are not identical... 
-                                        */
-                                       vm_map_unlock(map);
-                                       return KERN_FAILURE;
-                               }
-                               if ((file_off < (obj->paging_offset + obj_off_aligned)) &&
-                                   ((file_off + len) > (obj->paging_offset + obj_off_aligned))) {
-                                       /*
-                                        * some portion of the tail of the I/O will fall
-                                        * within the encompass of the target map
-                                        */
-                                       vm_map_unlock(map);
-                                       return KERN_FAILURE;
-                               }
-                               if ((file_off_aligned > (obj->paging_offset + obj_off)) &&
-                                   (file_off_aligned < (obj->paging_offset + obj_off) + len)) {
-                                       /*
-                                        * the beginning page of the file offset falls within
-                                        * the target map's encompass
-                                        */
-                                       vm_map_unlock(map);
-                                       return KERN_FAILURE;
-                               }
-                       }
-               } else if(kr != KERN_SUCCESS) {
-                       vm_map_unlock(map);
-                       return KERN_FAILURE;
-               }
-
-               if(len <= ((entry->vme_end - entry->vme_start) -
-                                               (off - entry->vme_start))) {
-                       vm_map_unlock(map);
-                       return kr;
-               } else {
-                       len -= (entry->vme_end - entry->vme_start) -
-                                               (off - entry->vme_start);
-               }
-               base_offset = base_offset + (local_len - len);
-               file_off = file_off + (local_len - len);
-               off = base_offset;
-               if(map != base_map) {
-                       vm_map_unlock(map);
-                       vm_map_lock(base_map);
-                       map = base_map;
-               }
-       }
-
-       vm_map_unlock(map);
-       return kr;
-}
-
 /*
  *
  *     The following two functions are to be used when exposing kernel
@@ -1412,45 +1484,84 @@ vm_conflict_check(
  *     NOTE: USE THE MACRO VERSIONS OF THESE FUNCTIONS (in vm_param.h) FROM WITHIN THE KERNEL
  */
 
-/*
- *     vm_kernel_addrperm_external:
- *
- *     Used when exposing an address to userspace which is in the kernel's
- *     "heap". These addresses are not loaded from anywhere and are resultingly
- *     unslid. We apply a permutation value to obscure the address.
- */
-void
-vm_kernel_addrperm_external(
+static void
+vm_kernel_addrhash_internal(
        vm_offset_t addr,
-       vm_offset_t *perm_addr)
+       vm_offset_t *hash_addr,
+       uint64_t salt)
 {
+       assert(salt != 0);
+
        if (addr == 0) {
-               *perm_addr = 0;
+               *hash_addr = 0;
                return;
        }
 
-       *perm_addr = (addr + vm_kernel_addrperm_ext);
-       return;
+       if (VM_KERNEL_IS_SLID(addr)) {
+               *hash_addr = VM_KERNEL_UNSLIDE(addr);
+               return;
+       }
+
+       vm_offset_t sha_digest[SHA256_DIGEST_LENGTH/sizeof(vm_offset_t)];
+       SHA256_CTX sha_ctx;
+
+       SHA256_Init(&sha_ctx);
+       SHA256_Update(&sha_ctx, &salt, sizeof(salt));
+       SHA256_Update(&sha_ctx, &addr, sizeof(addr));
+       SHA256_Final(sha_digest, &sha_ctx);
+
+       *hash_addr = sha_digest[0];
+}
+
+void
+vm_kernel_addrhash_external(
+       vm_offset_t addr,
+       vm_offset_t *hash_addr)
+{
+       return vm_kernel_addrhash_internal(addr, hash_addr, vm_kernel_addrhash_salt_ext);
+}
+
+vm_offset_t
+vm_kernel_addrhash(vm_offset_t addr)
+{
+       vm_offset_t hash_addr;
+       vm_kernel_addrhash_internal(addr, &hash_addr, vm_kernel_addrhash_salt);
+       return hash_addr;
+}
+
+void
+vm_kernel_addrhide(
+       vm_offset_t addr,
+       vm_offset_t *hide_addr)
+{
+       *hide_addr = VM_KERNEL_ADDRHIDE(addr);
 }
 
 /*
+ *     vm_kernel_addrperm_external:
  *     vm_kernel_unslide_or_perm_external:
  *
- *     Use this macro when exposing an address to userspace that could come from
+ *     Use these macros when exposing an address to userspace that could come from
  *     either kernel text/data *or* the heap.
  */
 void
-vm_kernel_unslide_or_perm_external(
+vm_kernel_addrperm_external(
        vm_offset_t addr,
-       vm_offset_t *up_addr)
+       vm_offset_t *perm_addr)
 {
-       if (VM_KERNEL_IS_SLID(addr) || VM_KERNEL_IS_KEXT(addr) ||
-        VM_KERNEL_IS_PRELINKTEXT(addr) || VM_KERNEL_IS_PRELINKINFO(addr) ||
-        VM_KERNEL_IS_KEXT_LINKEDIT(addr)) {
-               *up_addr = addr - vm_kernel_slide;
-               return;
+       if (VM_KERNEL_IS_SLID(addr)) {
+               *perm_addr = VM_KERNEL_UNSLIDE(addr);
+       } else if (VM_KERNEL_ADDRESS(addr)) {
+               *perm_addr = addr + vm_kernel_addrperm_ext;
+       } else {
+               *perm_addr = addr;
        }
+}
 
+void
+vm_kernel_unslide_or_perm_external(
+       vm_offset_t addr,
+       vm_offset_t *up_addr)
+{
        vm_kernel_addrperm_external(addr, up_addr);
-       return;
 }