]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_user.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
index 886dbb6ffdc3fe970647bb6828577527aeaf1b90..2682dfaaf1b733d15953ffcf463aa4967420961f 100644 (file)
@@ -1,8 +1,8 @@
 /*
 /*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
  */
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
  */
-/* 
+/*
  * Mach Operating System
  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
  * All Rights Reserved.
  * Mach Operating System
  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
  * All Rights Reserved.
- * 
+ *
  * Permission to use, copy, modify and distribute this software and its
  * documentation is hereby granted, provided that both the copyright
  * notice and this permission notice appear in all copies of the
  * software, derivative works or modified versions, and any portions
  * thereof, and that both notices appear in supporting documentation.
  * Permission to use, copy, modify and distribute this software and its
  * documentation is hereby granted, provided that both the copyright
  * notice and this permission notice appear in all copies of the
  * software, derivative works or modified versions, and any portions
  * thereof, and that both notices appear in supporting documentation.
- * 
+ *
  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- * 
+ *
  * Carnegie Mellon requests users of this software to return to
  * Carnegie Mellon requests users of this software to return to
- * 
+ *
  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
  *  School of Computer Science
  *  Carnegie Mellon University
  *  Pittsburgh PA 15213-3890
  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
  *  School of Computer Science
  *  Carnegie Mellon University
  *  Pittsburgh PA 15213-3890
- * 
+ *
  * any improvements or extensions that they make and grant Carnegie Mellon
  * the rights to redistribute these changes.
  */
  * any improvements or extensions that they make and grant Carnegie Mellon
  * the rights to redistribute these changes.
  */
@@ -58,7 +58,7 @@
 /*
  *     File:   vm/vm_user.c
  *     Author: Avadis Tevanian, Jr., Michael Wayne Young
 /*
  *     File:   vm/vm_user.c
  *     Author: Avadis Tevanian, Jr., Michael Wayne Young
- * 
+ *
  *     User-exported virtual memory functions.
  */
 
  *     User-exported virtual memory functions.
  */
 
@@ -90,9 +90,9 @@
 #include <vm_cpm.h>
 #include <mach/boolean.h>
 #include <mach/kern_return.h>
 #include <vm_cpm.h>
 #include <mach/boolean.h>
 #include <mach/kern_return.h>
-#include <mach/mach_types.h>   /* to get vm_address_t */
+#include <mach/mach_types.h>    /* to get vm_address_t */
 #include <mach/memory_object.h>
 #include <mach/memory_object.h>
-#include <mach/std_types.h>    /* to get pointer_t */
+#include <mach/std_types.h>     /* to get pointer_t */
 #include <mach/upl.h>
 #include <mach/vm_attributes.h>
 #include <mach/vm_param.h>
 #include <mach/upl.h>
 #include <mach/vm_attributes.h>
 #include <mach/vm_param.h>
 
 #include <mach/host_priv_server.h>
 #include <mach/mach_vm_server.h>
 
 #include <mach/host_priv_server.h>
 #include <mach/mach_vm_server.h>
+#include <mach/memory_entry_server.h>
 #include <mach/vm_map_server.h>
 
 #include <kern/host.h>
 #include <mach/vm_map_server.h>
 
 #include <kern/host.h>
 #include <vm/vm_purgeable_internal.h>
 #include <vm/vm_init.h>
 
 #include <vm/vm_purgeable_internal.h>
 #include <vm/vm_init.h>
 
+#include <san/kasan.h>
+
+#include <libkern/OSDebug.h>
+#include <IOKit/IOBSD.h>
+
 vm_size_t        upl_offset_to_pagelist = 0;
 
 vm_size_t        upl_offset_to_pagelist = 0;
 
-#if    VM_CPM
+#if     VM_CPM
 #include <vm/cpm.h>
 #include <vm/cpm.h>
-#endif /* VM_CPM */
-
-lck_grp_t      dynamic_pager_control_port_lock_group;
-decl_lck_mtx_data(, dynamic_pager_control_port_lock);
-ipc_port_t     dynamic_pager_control_port=NULL;
+#endif  /* VM_CPM */
 
 /*
  *     mach_vm_allocate allocates "zero fill" memory in the specfied
  *     map.
  */
 kern_return_t
 
 /*
  *     mach_vm_allocate allocates "zero fill" memory in the specfied
  *     map.
  */
 kern_return_t
-mach_vm_allocate(
-       vm_map_t                map,
-       mach_vm_offset_t        *addr,
-       mach_vm_size_t  size,
-       int                     flags)
+mach_vm_allocate_external(
+       vm_map_t                map,
+       mach_vm_offset_t        *addr,
+       mach_vm_size_t  size,
+       int                     flags)
+{
+       vm_tag_t tag;
+
+       VM_GET_FLAGS_ALIAS(flags, tag);
+       return mach_vm_allocate_kernel(map, addr, size, flags, tag);
+}
+
+kern_return_t
+mach_vm_allocate_kernel(
+       vm_map_t                map,
+       mach_vm_offset_t        *addr,
+       mach_vm_size_t  size,
+       int                     flags,
+       vm_tag_t    tag)
 {
        vm_map_offset_t map_addr;
 {
        vm_map_offset_t map_addr;
-       vm_map_size_t   map_size;
-       kern_return_t   result;
-       boolean_t       anywhere;
+       vm_map_size_t   map_size;
+       kern_return_t   result;
+       boolean_t       anywhere;
 
        /* filter out any kernel-only flags */
 
        /* filter out any kernel-only flags */
-       if (flags & ~VM_FLAGS_USER_ALLOCATE)
+       if (flags & ~VM_FLAGS_USER_ALLOCATE) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
 
-       if (map == VM_MAP_NULL)
-               return(KERN_INVALID_ARGUMENT);
+       if (map == VM_MAP_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
        if (size == 0) {
                *addr = 0;
        if (size == 0) {
                *addr = 0;
-               return(KERN_SUCCESS);
+               return KERN_SUCCESS;
        }
 
        anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
        }
 
        anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
@@ -168,60 +186,80 @@ mach_vm_allocate(
                 * memory would tend to confuse those applications.
                 */
                map_addr = vm_map_min(map);
                 * memory would tend to confuse those applications.
                 */
                map_addr = vm_map_min(map);
-               if (map_addr == 0)
+               if (map_addr == 0) {
                        map_addr += VM_MAP_PAGE_SIZE(map);
                        map_addr += VM_MAP_PAGE_SIZE(map);
-       } else
+               }
+       } else {
                map_addr = vm_map_trunc_page(*addr,
                map_addr = vm_map_trunc_page(*addr,
-                                            VM_MAP_PAGE_MASK(map));
+                   VM_MAP_PAGE_MASK(map));
+       }
        map_size = vm_map_round_page(size,
        map_size = vm_map_round_page(size,
-                                    VM_MAP_PAGE_MASK(map));
+           VM_MAP_PAGE_MASK(map));
        if (map_size == 0) {
        if (map_size == 0) {
-         return(KERN_INVALID_ARGUMENT);
+               return KERN_INVALID_ARGUMENT;
        }
 
        result = vm_map_enter(
        }
 
        result = vm_map_enter(
-                       map,
-                       &map_addr,
-                       map_size,
-                       (vm_map_offset_t)0,
-                       flags,
-                       VM_OBJECT_NULL,
-                       (vm_object_offset_t)0,
-                       FALSE,
-                       VM_PROT_DEFAULT,
-                       VM_PROT_ALL,
-                       VM_INHERIT_DEFAULT);
+               map,
+               &map_addr,
+               map_size,
+               (vm_map_offset_t)0,
+               flags,
+               VM_MAP_KERNEL_FLAGS_NONE,
+               tag,
+               VM_OBJECT_NULL,
+               (vm_object_offset_t)0,
+               FALSE,
+               VM_PROT_DEFAULT,
+               VM_PROT_ALL,
+               VM_INHERIT_DEFAULT);
 
        *addr = map_addr;
 
        *addr = map_addr;
-       return(result);
+       return result;
 }
 
 /*
 }
 
 /*
- *     vm_allocate 
+ *     vm_allocate
  *     Legacy routine that allocates "zero fill" memory in the specfied
  *     map (which is limited to the same size as the kernel).
  */
 kern_return_t
  *     Legacy routine that allocates "zero fill" memory in the specfied
  *     map (which is limited to the same size as the kernel).
  */
 kern_return_t
-vm_allocate(
-       vm_map_t        map,
-       vm_offset_t     *addr,
-       vm_size_t       size,
-       int             flags)
+vm_allocate_external(
+       vm_map_t        map,
+       vm_offset_t     *addr,
+       vm_size_t       size,
+       int             flags)
+{
+       vm_tag_t tag;
+
+       VM_GET_FLAGS_ALIAS(flags, tag);
+       return vm_allocate_kernel(map, addr, size, flags, tag);
+}
+
+kern_return_t
+vm_allocate_kernel(
+       vm_map_t        map,
+       vm_offset_t     *addr,
+       vm_size_t       size,
+       int         flags,
+       vm_tag_t    tag)
 {
        vm_map_offset_t map_addr;
 {
        vm_map_offset_t map_addr;
-       vm_map_size_t   map_size;
-       kern_return_t   result;
-       boolean_t       anywhere;
+       vm_map_size_t   map_size;
+       kern_return_t   result;
+       boolean_t       anywhere;
 
        /* filter out any kernel-only flags */
 
        /* filter out any kernel-only flags */
-       if (flags & ~VM_FLAGS_USER_ALLOCATE)
+       if (flags & ~VM_FLAGS_USER_ALLOCATE) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
 
-       if (map == VM_MAP_NULL)
-               return(KERN_INVALID_ARGUMENT);
+       if (map == VM_MAP_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
        if (size == 0) {
                *addr = 0;
        if (size == 0) {
                *addr = 0;
-               return(KERN_SUCCESS);
+               return KERN_SUCCESS;
        }
 
        anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
        }
 
        anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
@@ -237,32 +275,42 @@ vm_allocate(
                 * memory would tend to confuse those applications.
                 */
                map_addr = vm_map_min(map);
                 * memory would tend to confuse those applications.
                 */
                map_addr = vm_map_min(map);
-               if (map_addr == 0)
+               if (map_addr == 0) {
                        map_addr += VM_MAP_PAGE_SIZE(map);
                        map_addr += VM_MAP_PAGE_SIZE(map);
-       } else
+               }
+       } else {
                map_addr = vm_map_trunc_page(*addr,
                map_addr = vm_map_trunc_page(*addr,
-                                            VM_MAP_PAGE_MASK(map));
+                   VM_MAP_PAGE_MASK(map));
+       }
        map_size = vm_map_round_page(size,
        map_size = vm_map_round_page(size,
-                                    VM_MAP_PAGE_MASK(map));
+           VM_MAP_PAGE_MASK(map));
        if (map_size == 0) {
        if (map_size == 0) {
-         return(KERN_INVALID_ARGUMENT);
+               return KERN_INVALID_ARGUMENT;
        }
 
        result = vm_map_enter(
        }
 
        result = vm_map_enter(
-                       map,
-                       &map_addr,
-                       map_size,
-                       (vm_map_offset_t)0,
-                       flags,
-                       VM_OBJECT_NULL,
-                       (vm_object_offset_t)0,
-                       FALSE,
-                       VM_PROT_DEFAULT,
-                       VM_PROT_ALL,
-                       VM_INHERIT_DEFAULT);
+               map,
+               &map_addr,
+               map_size,
+               (vm_map_offset_t)0,
+               flags,
+               VM_MAP_KERNEL_FLAGS_NONE,
+               tag,
+               VM_OBJECT_NULL,
+               (vm_object_offset_t)0,
+               FALSE,
+               VM_PROT_DEFAULT,
+               VM_PROT_ALL,
+               VM_INHERIT_DEFAULT);
+
+#if KASAN
+       if (result == KERN_SUCCESS && map->pmap == kernel_pmap) {
+               kasan_notify_address(map_addr, map_size);
+       }
+#endif
 
        *addr = CAST_DOWN(vm_offset_t, map_addr);
 
        *addr = CAST_DOWN(vm_offset_t, map_addr);
-       return(result);
+       return result;
 }
 
 /*
 }
 
 /*
@@ -272,22 +320,24 @@ vm_allocate(
  */
 kern_return_t
 mach_vm_deallocate(
  */
 kern_return_t
 mach_vm_deallocate(
-       vm_map_t                map,
-       mach_vm_offset_t        start,
-       mach_vm_size_t  size)
+       vm_map_t                map,
+       mach_vm_offset_t        start,
+       mach_vm_size_t  size)
 {
 {
-       if ((map == VM_MAP_NULL) || (start + size < start))
-               return(KERN_INVALID_ARGUMENT);
+       if ((map == VM_MAP_NULL) || (start + size < start)) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
 
-       if (size == (mach_vm_offset_t) 0)
-               return(KERN_SUCCESS);
+       if (size == (mach_vm_offset_t) 0) {
+               return KERN_SUCCESS;
+       }
 
 
-       return(vm_map_remove(map,
-                            vm_map_trunc_page(start,
-                                              VM_MAP_PAGE_MASK(map)),
-                            vm_map_round_page(start+size,
-                                              VM_MAP_PAGE_MASK(map)),
-                            VM_MAP_NO_FLAGS));
+       return vm_map_remove(map,
+                  vm_map_trunc_page(start,
+                  VM_MAP_PAGE_MASK(map)),
+                  vm_map_round_page(start + size,
+                  VM_MAP_PAGE_MASK(map)),
+                  VM_MAP_REMOVE_NO_FLAGS);
 }
 
 /*
 }
 
 /*
@@ -298,22 +348,24 @@ mach_vm_deallocate(
  */
 kern_return_t
 vm_deallocate(
  */
 kern_return_t
 vm_deallocate(
-       vm_map_t                map,
-       vm_offset_t             start,
-       vm_size_t               size)
+       vm_map_t                map,
+       vm_offset_t             start,
+       vm_size_t               size)
 {
 {
-       if ((map == VM_MAP_NULL) || (start + size < start))
-               return(KERN_INVALID_ARGUMENT);
+       if ((map == VM_MAP_NULL) || (start + size < start)) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
 
-       if (size == (vm_offset_t) 0)
-               return(KERN_SUCCESS);
+       if (size == (vm_offset_t) 0) {
+               return KERN_SUCCESS;
+       }
 
 
-       return(vm_map_remove(map,
-                            vm_map_trunc_page(start,
-                                              VM_MAP_PAGE_MASK(map)),
-                            vm_map_round_page(start+size,
-                                              VM_MAP_PAGE_MASK(map)),
-                            VM_MAP_NO_FLAGS));
+       return vm_map_remove(map,
+                  vm_map_trunc_page(start,
+                  VM_MAP_PAGE_MASK(map)),
+                  vm_map_round_page(start + size,
+                  VM_MAP_PAGE_MASK(map)),
+                  VM_MAP_REMOVE_NO_FLAGS);
 }
 
 /*
 }
 
 /*
@@ -323,24 +375,26 @@ vm_deallocate(
  */
 kern_return_t
 mach_vm_inherit(
  */
 kern_return_t
 mach_vm_inherit(
-       vm_map_t                map,
-       mach_vm_offset_t        start,
-       mach_vm_size_t  size,
-       vm_inherit_t            new_inheritance)
+       vm_map_t                map,
+       mach_vm_offset_t        start,
+       mach_vm_size_t  size,
+       vm_inherit_t            new_inheritance)
 {
        if ((map == VM_MAP_NULL) || (start + size < start) ||
 {
        if ((map == VM_MAP_NULL) || (start + size < start) ||
-           (new_inheritance > VM_INHERIT_LAST_VALID))
-                return(KERN_INVALID_ARGUMENT);
+           (new_inheritance > VM_INHERIT_LAST_VALID)) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
 
-       if (size == 0)
+       if (size == 0) {
                return KERN_SUCCESS;
                return KERN_SUCCESS;
+       }
 
 
-       return(vm_map_inherit(map,
-                             vm_map_trunc_page(start,
-                                               VM_MAP_PAGE_MASK(map)),
-                             vm_map_round_page(start+size,
-                                               VM_MAP_PAGE_MASK(map)),
-                             new_inheritance));
+       return vm_map_inherit(map,
+                  vm_map_trunc_page(start,
+                  VM_MAP_PAGE_MASK(map)),
+                  vm_map_round_page(start + size,
+                  VM_MAP_PAGE_MASK(map)),
+                  new_inheritance);
 }
 
 /*
 }
 
 /*
@@ -350,24 +404,26 @@ mach_vm_inherit(
  */
 kern_return_t
 vm_inherit(
  */
 kern_return_t
 vm_inherit(
-       vm_map_t                map,
-       vm_offset_t             start,
-       vm_size_t               size,
-       vm_inherit_t            new_inheritance)
+       vm_map_t                map,
+       vm_offset_t             start,
+       vm_size_t               size,
+       vm_inherit_t            new_inheritance)
 {
        if ((map == VM_MAP_NULL) || (start + size < start) ||
 {
        if ((map == VM_MAP_NULL) || (start + size < start) ||
-           (new_inheritance > VM_INHERIT_LAST_VALID))
-                return(KERN_INVALID_ARGUMENT);
+           (new_inheritance > VM_INHERIT_LAST_VALID)) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
 
-       if (size == 0)
+       if (size == 0) {
                return KERN_SUCCESS;
                return KERN_SUCCESS;
+       }
 
 
-       return(vm_map_inherit(map,
-                             vm_map_trunc_page(start,
-                                               VM_MAP_PAGE_MASK(map)),
-                             vm_map_round_page(start+size,
-                                               VM_MAP_PAGE_MASK(map)),
-                             new_inheritance));
+       return vm_map_inherit(map,
+                  vm_map_trunc_page(start,
+                  VM_MAP_PAGE_MASK(map)),
+                  vm_map_round_page(start + size,
+                  VM_MAP_PAGE_MASK(map)),
+                  new_inheritance);
 }
 
 /*
 }
 
 /*
@@ -378,26 +434,28 @@ vm_inherit(
 
 kern_return_t
 mach_vm_protect(
 
 kern_return_t
 mach_vm_protect(
-       vm_map_t                map,
-       mach_vm_offset_t        start,
-       mach_vm_size_t  size,
-       boolean_t               set_maximum,
-       vm_prot_t               new_protection)
+       vm_map_t                map,
+       mach_vm_offset_t        start,
+       mach_vm_size_t  size,
+       boolean_t               set_maximum,
+       vm_prot_t               new_protection)
 {
        if ((map == VM_MAP_NULL) || (start + size < start) ||
 {
        if ((map == VM_MAP_NULL) || (start + size < start) ||
-           (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
-               return(KERN_INVALID_ARGUMENT);
+           (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
 
-       if (size == 0)
+       if (size == 0) {
                return KERN_SUCCESS;
                return KERN_SUCCESS;
+       }
 
 
-       return(vm_map_protect(map,
-                             vm_map_trunc_page(start,
-                                               VM_MAP_PAGE_MASK(map)),
-                             vm_map_round_page(start+size,
-                                               VM_MAP_PAGE_MASK(map)),
-                             new_protection,
-                             set_maximum));
+       return vm_map_protect(map,
+                  vm_map_trunc_page(start,
+                  VM_MAP_PAGE_MASK(map)),
+                  vm_map_round_page(start + size,
+                  VM_MAP_PAGE_MASK(map)),
+                  new_protection,
+                  set_maximum);
 }
 
 /*
 }
 
 /*
@@ -409,26 +467,28 @@ mach_vm_protect(
 
 kern_return_t
 vm_protect(
 
 kern_return_t
 vm_protect(
-       vm_map_t                map,
-       vm_offset_t             start,
-       vm_size_t               size,
-       boolean_t               set_maximum,
-       vm_prot_t               new_protection)
+       vm_map_t                map,
+       vm_offset_t             start,
+       vm_size_t               size,
+       boolean_t               set_maximum,
+       vm_prot_t               new_protection)
 {
        if ((map == VM_MAP_NULL) || (start + size < start) ||
 {
        if ((map == VM_MAP_NULL) || (start + size < start) ||
-           (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY)))
-               return(KERN_INVALID_ARGUMENT);
+           (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
 
-       if (size == 0)
+       if (size == 0) {
                return KERN_SUCCESS;
                return KERN_SUCCESS;
+       }
 
 
-       return(vm_map_protect(map,
-                             vm_map_trunc_page(start,
-                                               VM_MAP_PAGE_MASK(map)),
-                             vm_map_round_page(start+size,
-                                               VM_MAP_PAGE_MASK(map)),
-                             new_protection,
-                             set_maximum));
+       return vm_map_protect(map,
+                  vm_map_trunc_page(start,
+                  VM_MAP_PAGE_MASK(map)),
+                  vm_map_round_page(start + size,
+                  VM_MAP_PAGE_MASK(map)),
+                  new_protection,
+                  set_maximum);
 }
 
 /*
 }
 
 /*
@@ -438,24 +498,26 @@ vm_protect(
  */
 kern_return_t
 mach_vm_machine_attribute(
  */
 kern_return_t
 mach_vm_machine_attribute(
-       vm_map_t                        map,
-       mach_vm_address_t               addr,
-       mach_vm_size_t          size,
-       vm_machine_attribute_t  attribute,
-       vm_machine_attribute_val_t* value)              /* IN/OUT */
+       vm_map_t                        map,
+       mach_vm_address_t               addr,
+       mach_vm_size_t          size,
+       vm_machine_attribute_t  attribute,
+       vm_machine_attribute_val_t* value)              /* IN/OUT */
 {
 {
-       if ((map == VM_MAP_NULL) || (addr + size < addr))
-               return(KERN_INVALID_ARGUMENT);
+       if ((map == VM_MAP_NULL) || (addr + size < addr)) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
 
-       if (size == 0)
+       if (size == 0) {
                return KERN_SUCCESS;
                return KERN_SUCCESS;
+       }
 
        return vm_map_machine_attribute(
 
        return vm_map_machine_attribute(
-               map, 
+               map,
                vm_map_trunc_page(addr,
                vm_map_trunc_page(addr,
-                                 VM_MAP_PAGE_MASK(map)),
-               vm_map_round_page(addr+size,
-                                 VM_MAP_PAGE_MASK(map)),
+               VM_MAP_PAGE_MASK(map)),
+               vm_map_round_page(addr + size,
+               VM_MAP_PAGE_MASK(map)),
                attribute,
                value);
 }
                attribute,
                value);
 }
@@ -468,24 +530,26 @@ mach_vm_machine_attribute(
  */
 kern_return_t
 vm_machine_attribute(
  */
 kern_return_t
 vm_machine_attribute(
-       vm_map_t        map,
-       vm_address_t    addr,
-       vm_size_t       size,
-       vm_machine_attribute_t  attribute,
-       vm_machine_attribute_val_t* value)              /* IN/OUT */
+       vm_map_t        map,
+       vm_address_t    addr,
+       vm_size_t       size,
+       vm_machine_attribute_t  attribute,
+       vm_machine_attribute_val_t* value)              /* IN/OUT */
 {
 {
-       if ((map == VM_MAP_NULL) || (addr + size < addr))
-               return(KERN_INVALID_ARGUMENT);
+       if ((map == VM_MAP_NULL) || (addr + size < addr)) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
 
-       if (size == 0)
+       if (size == 0) {
                return KERN_SUCCESS;
                return KERN_SUCCESS;
+       }
 
        return vm_map_machine_attribute(
 
        return vm_map_machine_attribute(
-               map, 
+               map,
                vm_map_trunc_page(addr,
                vm_map_trunc_page(addr,
-                                 VM_MAP_PAGE_MASK(map)),
-               vm_map_round_page(addr+size,
-                                 VM_MAP_PAGE_MASK(map)),
+               VM_MAP_PAGE_MASK(map)),
+               vm_map_round_page(addr + size,
+               VM_MAP_PAGE_MASK(map)),
                attribute,
                value);
 }
                attribute,
                value);
 }
@@ -498,47 +562,49 @@ vm_machine_attribute(
  * the IPC implementation as part of receiving the reply to this call.
  * If IPC isn't used, the caller must deal with the vm_map_copy_t object
  * that gets returned.
  * the IPC implementation as part of receiving the reply to this call.
  * If IPC isn't used, the caller must deal with the vm_map_copy_t object
  * that gets returned.
- * 
+ *
  * JMM - because of mach_msg_type_number_t, this call is limited to a
  * single 4GB region at this time.
  *
  */
 kern_return_t
 mach_vm_read(
  * JMM - because of mach_msg_type_number_t, this call is limited to a
  * single 4GB region at this time.
  *
  */
 kern_return_t
 mach_vm_read(
-       vm_map_t                map,
-       mach_vm_address_t       addr,
-       mach_vm_size_t  size,
-       pointer_t               *data,
-       mach_msg_type_number_t  *data_size)
+       vm_map_t                map,
+       mach_vm_address_t       addr,
+       mach_vm_size_t  size,
+       pointer_t               *data,
+       mach_msg_type_number_t  *data_size)
 {
 {
-       kern_return_t   error;
-       vm_map_copy_t   ipc_address;
+       kern_return_t   error;
+       vm_map_copy_t   ipc_address;
 
 
-       if (map == VM_MAP_NULL)
-               return(KERN_INVALID_ARGUMENT);
+       if (map == VM_MAP_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
 
-       if ((mach_msg_type_number_t) size != size)
+       if ((mach_msg_type_number_t) size != size) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
-       
+       }
+
        error = vm_map_copyin(map,
        error = vm_map_copyin(map,
-                       (vm_map_address_t)addr,
-                       (vm_map_size_t)size,
-                       FALSE,  /* src_destroy */
-                       &ipc_address);
+           (vm_map_address_t)addr,
+           (vm_map_size_t)size,
+           FALSE,              /* src_destroy */
+           &ipc_address);
 
        if (KERN_SUCCESS == error) {
                *data = (pointer_t) ipc_address;
                *data_size = (mach_msg_type_number_t) size;
                assert(*data_size == size);
        }
 
        if (KERN_SUCCESS == error) {
                *data = (pointer_t) ipc_address;
                *data_size = (mach_msg_type_number_t) size;
                assert(*data_size == size);
        }
-       return(error);
+       return error;
 }
 
 /*
  * vm_read -
  * Read/copy a range from one address space and return it to the caller.
  * Limited addressability (same range limits as for the native kernel map).
 }
 
 /*
  * vm_read -
  * Read/copy a range from one address space and return it to the caller.
  * Limited addressability (same range limits as for the native kernel map).
- * 
+ *
  * It is assumed that the address for the returned memory is selected by
  * the IPC implementation as part of receiving the reply to this call.
  * If IPC isn't used, the caller must deal with the vm_map_copy_t object
  * It is assumed that the address for the returned memory is selected by
  * the IPC implementation as part of receiving the reply to this call.
  * If IPC isn't used, the caller must deal with the vm_map_copy_t object
@@ -546,19 +612,21 @@ mach_vm_read(
  */
 kern_return_t
 vm_read(
  */
 kern_return_t
 vm_read(
-       vm_map_t                map,
-       vm_address_t            addr,
-       vm_size_t               size,
-       pointer_t               *data,
-       mach_msg_type_number_t  *data_size)
+       vm_map_t                map,
+       vm_address_t            addr,
+       vm_size_t               size,
+       pointer_t               *data,
+       mach_msg_type_number_t  *data_size)
 {
 {
-       kern_return_t   error;
-       vm_map_copy_t   ipc_address;
+       kern_return_t   error;
+       vm_map_copy_t   ipc_address;
 
 
-       if (map == VM_MAP_NULL)
-               return(KERN_INVALID_ARGUMENT);
+       if (map == VM_MAP_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
 
-       if (size > (unsigned)(mach_msg_type_number_t) -1) {
+       mach_msg_type_number_t dsize;
+       if (os_convert_overflow(size, &dsize)) {
                /*
                 * The kernel could handle a 64-bit "size" value, but
                 * it could not return the size of the data in "*data_size"
                /*
                 * The kernel could handle a 64-bit "size" value, but
                 * it could not return the size of the data in "*data_size"
@@ -569,20 +637,20 @@ vm_read(
        }
 
        error = vm_map_copyin(map,
        }
 
        error = vm_map_copyin(map,
-                       (vm_map_address_t)addr,
-                       (vm_map_size_t)size,
-                       FALSE,  /* src_destroy */
-                       &ipc_address);
+           (vm_map_address_t)addr,
+           (vm_map_size_t)size,
+           FALSE,              /* src_destroy */
+           &ipc_address);
 
        if (KERN_SUCCESS == error) {
                *data = (pointer_t) ipc_address;
 
        if (KERN_SUCCESS == error) {
                *data = (pointer_t) ipc_address;
-               *data_size = (mach_msg_type_number_t) size;
+               *data_size = dsize;
                assert(*data_size == size);
        }
                assert(*data_size == size);
        }
-       return(error);
+       return error;
 }
 
 }
 
-/* 
+/*
  * mach_vm_read_list -
  * Read/copy a list of address ranges from specified map.
  *
  * mach_vm_read_list -
  * Read/copy a list of address ranges from specified map.
  *
@@ -592,37 +660,38 @@ vm_read(
  */
 kern_return_t
 mach_vm_read_list(
  */
 kern_return_t
 mach_vm_read_list(
-       vm_map_t                        map,
-       mach_vm_read_entry_t            data_list,
-       natural_t                       count)
+       vm_map_t                        map,
+       mach_vm_read_entry_t            data_list,
+       natural_t                       count)
 {
 {
-       mach_msg_type_number_t  i;
-       kern_return_t   error;
-       vm_map_copy_t   copy;
+       mach_msg_type_number_t  i;
+       kern_return_t   error;
+       vm_map_copy_t   copy;
 
        if (map == VM_MAP_NULL ||
 
        if (map == VM_MAP_NULL ||
-           count > VM_MAP_ENTRY_MAX)
-               return(KERN_INVALID_ARGUMENT);
+           count > VM_MAP_ENTRY_MAX) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
        error = KERN_SUCCESS;
 
        error = KERN_SUCCESS;
-       for(i=0; i<count; i++) {
+       for (i = 0; i < count; i++) {
                vm_map_address_t map_addr;
                vm_map_size_t map_size;
 
                map_addr = (vm_map_address_t)(data_list[i].address);
                map_size = (vm_map_size_t)(data_list[i].size);
 
                vm_map_address_t map_addr;
                vm_map_size_t map_size;
 
                map_addr = (vm_map_address_t)(data_list[i].address);
                map_size = (vm_map_size_t)(data_list[i].size);
 
-               if(map_size != 0) {
+               if (map_size != 0) {
                        error = vm_map_copyin(map,
                        error = vm_map_copyin(map,
-                                       map_addr,
-                                       map_size,
-                                       FALSE,  /* src_destroy */
-                                       &copy);
+                           map_addr,
+                           map_size,
+                           FALSE,              /* src_destroy */
+                           &copy);
                        if (KERN_SUCCESS == error) {
                                error = vm_map_copyout(
                        if (KERN_SUCCESS == error) {
                                error = vm_map_copyout(
-                                               current_task()->map, 
-                                               &map_addr,
-                                               copy);
+                                       current_task()->map,
+                                       &map_addr,
+                                       copy);
                                if (KERN_SUCCESS == error) {
                                        data_list[i].address = map_addr;
                                        continue;
                                if (KERN_SUCCESS == error) {
                                        data_list[i].address = map_addr;
                                        continue;
@@ -633,10 +702,10 @@ mach_vm_read_list(
                data_list[i].address = (mach_vm_address_t)0;
                data_list[i].size = (mach_vm_size_t)0;
        }
                data_list[i].address = (mach_vm_address_t)0;
                data_list[i].size = (mach_vm_size_t)0;
        }
-       return(error);
+       return error;
 }
 
 }
 
-/* 
+/*
  * vm_read_list -
  * Read/copy a list of address ranges from specified map.
  *
  * vm_read_list -
  * Read/copy a list of address ranges from specified map.
  *
@@ -657,39 +726,40 @@ mach_vm_read_list(
 
 kern_return_t
 vm_read_list(
 
 kern_return_t
 vm_read_list(
-       vm_map_t                map,
-       vm_read_entry_t data_list,
-       natural_t               count)
+       vm_map_t                map,
+       vm_read_entry_t data_list,
+       natural_t               count)
 {
 {
-       mach_msg_type_number_t  i;
-       kern_return_t   error;
-       vm_map_copy_t   copy;
+       mach_msg_type_number_t  i;
+       kern_return_t   error;
+       vm_map_copy_t   copy;
 
        if (map == VM_MAP_NULL ||
 
        if (map == VM_MAP_NULL ||
-           count > VM_MAP_ENTRY_MAX)
-               return(KERN_INVALID_ARGUMENT);
+           count > VM_MAP_ENTRY_MAX) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
        error = KERN_SUCCESS;
 
        error = KERN_SUCCESS;
-       for(i=0; i<count; i++) {
+       for (i = 0; i < count; i++) {
                vm_map_address_t map_addr;
                vm_map_size_t map_size;
 
                map_addr = (vm_map_address_t)(data_list[i].address);
                map_size = (vm_map_size_t)(data_list[i].size);
 
                vm_map_address_t map_addr;
                vm_map_size_t map_size;
 
                map_addr = (vm_map_address_t)(data_list[i].address);
                map_size = (vm_map_size_t)(data_list[i].size);
 
-               if(map_size != 0) {
+               if (map_size != 0) {
                        error = vm_map_copyin(map,
                        error = vm_map_copyin(map,
-                                       map_addr,
-                                       map_size,
-                                       FALSE,  /* src_destroy */
-                                       &copy);
+                           map_addr,
+                           map_size,
+                           FALSE,              /* src_destroy */
+                           &copy);
                        if (KERN_SUCCESS == error) {
                        if (KERN_SUCCESS == error) {
-                               error = vm_map_copyout(current_task()->map, 
-                                               &map_addr,
-                                               copy);
+                               error = vm_map_copyout(current_task()->map,
+                                   &map_addr,
+                                   copy);
                                if (KERN_SUCCESS == error) {
                                        data_list[i].address =
                                if (KERN_SUCCESS == error) {
                                        data_list[i].address =
-                                               CAST_DOWN(vm_offset_t, map_addr);
+                                           CAST_DOWN(vm_offset_t, map_addr);
                                        continue;
                                }
                                vm_map_copy_discard(copy);
                                        continue;
                                }
                                vm_map_copy_discard(copy);
@@ -698,14 +768,14 @@ vm_read_list(
                data_list[i].address = (mach_vm_address_t)0;
                data_list[i].size = (mach_vm_size_t)0;
        }
                data_list[i].address = (mach_vm_address_t)0;
                data_list[i].size = (mach_vm_size_t)0;
        }
-       return(error);
+       return error;
 }
 
 /*
  * mach_vm_read_overwrite -
  * Overwrite a range of the current map with data from the specified
  * map/address range.
 }
 
 /*
  * mach_vm_read_overwrite -
  * Overwrite a range of the current map with data from the specified
  * map/address range.
- * 
+ *
  * In making an assumption that the current thread is local, it is
  * no longer cluster-safe without a fully supportive local proxy
  * thread/task (but we don't support cluster's anymore so this is moot).
  * In making an assumption that the current thread is local, it is
  * no longer cluster-safe without a fully supportive local proxy
  * thread/task (but we don't support cluster's anymore so this is moot).
@@ -713,39 +783,44 @@ vm_read_list(
 
 kern_return_t
 mach_vm_read_overwrite(
 
 kern_return_t
 mach_vm_read_overwrite(
-       vm_map_t                map,
-       mach_vm_address_t       address,
-       mach_vm_size_t  size,
-       mach_vm_address_t       data,
-       mach_vm_size_t  *data_size)
+       vm_map_t                map,
+       mach_vm_address_t       address,
+       mach_vm_size_t  size,
+       mach_vm_address_t       data,
+       mach_vm_size_t  *data_size)
 {
 {
-       kern_return_t   error;
-       vm_map_copy_t   copy;
+       kern_return_t   error;
+       vm_map_copy_t   copy;
 
 
-       if (map == VM_MAP_NULL)
-               return(KERN_INVALID_ARGUMENT);
+       if (map == VM_MAP_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
        error = vm_map_copyin(map, (vm_map_address_t)address,
 
        error = vm_map_copyin(map, (vm_map_address_t)address,
-                               (vm_map_size_t)size, FALSE, &copy);
+           (vm_map_size_t)size, FALSE, &copy);
 
        if (KERN_SUCCESS == error) {
 
        if (KERN_SUCCESS == error) {
+               if (copy) {
+                       assertf(copy->size == (vm_map_size_t) size, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size, (uint64_t) copy->size);
+               }
+
                error = vm_map_copy_overwrite(current_thread()->map,
                error = vm_map_copy_overwrite(current_thread()->map,
-                                       (vm_map_address_t)data, 
-                                       copy, FALSE);
+                   (vm_map_address_t)data,
+                   copy, (vm_map_size_t) size, FALSE);
                if (KERN_SUCCESS == error) {
                        *data_size = size;
                        return error;
                }
                vm_map_copy_discard(copy);
        }
                if (KERN_SUCCESS == error) {
                        *data_size = size;
                        return error;
                }
                vm_map_copy_discard(copy);
        }
-       return(error);
+       return error;
 }
 
 /*
  * vm_read_overwrite -
  * Overwrite a range of the current map with data from the specified
  * map/address range.
 }
 
 /*
  * vm_read_overwrite -
  * Overwrite a range of the current map with data from the specified
  * map/address range.
- * 
+ *
  * This routine adds the additional limitation that the source and
  * destination ranges must be describable with vm_address_t values
  * (i.e. the same size address spaces as the kernel, or at least the
  * This routine adds the additional limitation that the source and
  * destination ranges must be describable with vm_address_t values
  * (i.e. the same size address spaces as the kernel, or at least the
@@ -755,32 +830,37 @@ mach_vm_read_overwrite(
 
 kern_return_t
 vm_read_overwrite(
 
 kern_return_t
 vm_read_overwrite(
-       vm_map_t        map,
-       vm_address_t    address,
-       vm_size_t       size,
-       vm_address_t    data,
-       vm_size_t       *data_size)
+       vm_map_t        map,
+       vm_address_t    address,
+       vm_size_t       size,
+       vm_address_t    data,
+       vm_size_t       *data_size)
 {
 {
-       kern_return_t   error;
-       vm_map_copy_t   copy;
+       kern_return_t   error;
+       vm_map_copy_t   copy;
 
 
-       if (map == VM_MAP_NULL)
-               return(KERN_INVALID_ARGUMENT);
+       if (map == VM_MAP_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
        error = vm_map_copyin(map, (vm_map_address_t)address,
 
        error = vm_map_copyin(map, (vm_map_address_t)address,
-                               (vm_map_size_t)size, FALSE, &copy);
+           (vm_map_size_t)size, FALSE, &copy);
 
        if (KERN_SUCCESS == error) {
 
        if (KERN_SUCCESS == error) {
+               if (copy) {
+                       assertf(copy->size == (vm_map_size_t) size, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size, (uint64_t) copy->size);
+               }
+
                error = vm_map_copy_overwrite(current_thread()->map,
                error = vm_map_copy_overwrite(current_thread()->map,
-                                       (vm_map_address_t)data, 
-                                       copy, FALSE);
+                   (vm_map_address_t)data,
+                   copy, (vm_map_size_t) size, FALSE);
                if (KERN_SUCCESS == error) {
                        *data_size = size;
                        return error;
                }
                vm_map_copy_discard(copy);
        }
                if (KERN_SUCCESS == error) {
                        *data_size = size;
                        return error;
                }
                vm_map_copy_discard(copy);
        }
-       return(error);
+       return error;
 }
 
 
 }
 
 
@@ -791,16 +871,17 @@ vm_read_overwrite(
  */
 kern_return_t
 mach_vm_write(
  */
 kern_return_t
 mach_vm_write(
-       vm_map_t                        map,
-       mach_vm_address_t               address,
-       pointer_t                       data,
-       __unused mach_msg_type_number_t size)
+       vm_map_t                        map,
+       mach_vm_address_t               address,
+       pointer_t                       data,
+       mach_msg_type_number_t          size)
 {
 {
-       if (map == VM_MAP_NULL)
+       if (map == VM_MAP_NULL) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        return vm_map_copy_overwrite(map, (vm_map_address_t)address,
 
        return vm_map_copy_overwrite(map, (vm_map_address_t)address,
-               (vm_map_copy_t) data, FALSE /* interruptible XXX */);
+                  (vm_map_copy_t) data, size, FALSE /* interruptible XXX */);
 }
 
 /*
 }
 
 /*
@@ -815,16 +896,17 @@ mach_vm_write(
  */
 kern_return_t
 vm_write(
  */
 kern_return_t
 vm_write(
-       vm_map_t                        map,
-       vm_address_t                    address,
-       pointer_t                       data,
-       __unused mach_msg_type_number_t size)
+       vm_map_t                        map,
+       vm_address_t                    address,
+       pointer_t                       data,
+       mach_msg_type_number_t          size)
 {
 {
-       if (map == VM_MAP_NULL)
+       if (map == VM_MAP_NULL) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        return vm_map_copy_overwrite(map, (vm_map_address_t)address,
 
        return vm_map_copy_overwrite(map, (vm_map_address_t)address,
-               (vm_map_copy_t) data, FALSE /* interruptible XXX */);
+                  (vm_map_copy_t) data, size, FALSE /* interruptible XXX */);
 }
 
 /*
 }
 
 /*
@@ -835,54 +917,66 @@ vm_write(
  */
 kern_return_t
 mach_vm_copy(
  */
 kern_return_t
 mach_vm_copy(
-       vm_map_t                map,
-       mach_vm_address_t       source_address,
-       mach_vm_size_t  size,
-       mach_vm_address_t       dest_address)
+       vm_map_t                map,
+       mach_vm_address_t       source_address,
+       mach_vm_size_t  size,
+       mach_vm_address_t       dest_address)
 {
        vm_map_copy_t copy;
        kern_return_t kr;
 
 {
        vm_map_copy_t copy;
        kern_return_t kr;
 
-       if (map == VM_MAP_NULL)
+       if (map == VM_MAP_NULL) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        kr = vm_map_copyin(map, (vm_map_address_t)source_address,
 
        kr = vm_map_copyin(map, (vm_map_address_t)source_address,
-                          (vm_map_size_t)size, FALSE, &copy);
+           (vm_map_size_t)size, FALSE, &copy);
 
        if (KERN_SUCCESS == kr) {
 
        if (KERN_SUCCESS == kr) {
+               if (copy) {
+                       assertf(copy->size == (vm_map_size_t) size, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size, (uint64_t) copy->size);
+               }
+
                kr = vm_map_copy_overwrite(map,
                kr = vm_map_copy_overwrite(map,
-                               (vm_map_address_t)dest_address,
-                               copy, FALSE /* interruptible XXX */);
+                   (vm_map_address_t)dest_address,
+                   copy, (vm_map_size_t) size, FALSE /* interruptible XXX */);
 
 
-               if (KERN_SUCCESS != kr)
+               if (KERN_SUCCESS != kr) {
                        vm_map_copy_discard(copy);
                        vm_map_copy_discard(copy);
+               }
        }
        return kr;
 }
 
 kern_return_t
 vm_copy(
        }
        return kr;
 }
 
 kern_return_t
 vm_copy(
-       vm_map_t        map,
-       vm_address_t    source_address,
-       vm_size_t       size,
-       vm_address_t    dest_address)
+       vm_map_t        map,
+       vm_address_t    source_address,
+       vm_size_t       size,
+       vm_address_t    dest_address)
 {
        vm_map_copy_t copy;
        kern_return_t kr;
 
 {
        vm_map_copy_t copy;
        kern_return_t kr;
 
-       if (map == VM_MAP_NULL)
+       if (map == VM_MAP_NULL) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        kr = vm_map_copyin(map, (vm_map_address_t)source_address,
 
        kr = vm_map_copyin(map, (vm_map_address_t)source_address,
-                          (vm_map_size_t)size, FALSE, &copy);
+           (vm_map_size_t)size, FALSE, &copy);
 
        if (KERN_SUCCESS == kr) {
 
        if (KERN_SUCCESS == kr) {
+               if (copy) {
+                       assertf(copy->size == (vm_map_size_t) size, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size, (uint64_t) copy->size);
+               }
+
                kr = vm_map_copy_overwrite(map,
                kr = vm_map_copy_overwrite(map,
-                               (vm_map_address_t)dest_address,
-                               copy, FALSE /* interruptible XXX */);
+                   (vm_map_address_t)dest_address,
+                   copy, (vm_map_size_t) size, FALSE /* interruptible XXX */);
 
 
-               if (KERN_SUCCESS != kr)
+               if (KERN_SUCCESS != kr) {
                        vm_map_copy_discard(copy);
                        vm_map_copy_discard(copy);
+               }
        }
        return kr;
 }
        }
        return kr;
 }
@@ -899,39 +993,74 @@ vm_copy(
  *
  */
 kern_return_t
  *
  */
 kern_return_t
-mach_vm_map(
-       vm_map_t                target_map,
-       mach_vm_offset_t        *address,
-       mach_vm_size_t  initial_size,
-       mach_vm_offset_t        mask,
-       int                     flags,
-       ipc_port_t              port,
-       vm_object_offset_t      offset,
-       boolean_t               copy,
-       vm_prot_t               cur_protection,
-       vm_prot_t               max_protection,
-       vm_inherit_t            inheritance)
-{
-       kern_return_t           kr;
-       vm_map_offset_t         vmmaddr;
+mach_vm_map_external(
+       vm_map_t                target_map,
+       mach_vm_offset_t        *address,
+       mach_vm_size_t          initial_size,
+       mach_vm_offset_t        mask,
+       int                     flags,
+       ipc_port_t              port,
+       vm_object_offset_t      offset,
+       boolean_t               copy,
+       vm_prot_t               cur_protection,
+       vm_prot_t               max_protection,
+       vm_inherit_t            inheritance)
+{
+       vm_tag_t tag;
+
+       VM_GET_FLAGS_ALIAS(flags, tag);
+       return mach_vm_map_kernel(target_map, address, initial_size, mask,
+                  flags, VM_MAP_KERNEL_FLAGS_NONE, tag,
+                  port, offset, copy,
+                  cur_protection, max_protection,
+                  inheritance);
+}
+
+kern_return_t
+mach_vm_map_kernel(
+       vm_map_t                target_map,
+       mach_vm_offset_t        *address,
+       mach_vm_size_t  initial_size,
+       mach_vm_offset_t        mask,
+       int                     flags,
+       vm_map_kernel_flags_t   vmk_flags,
+       vm_tag_t                tag,
+       ipc_port_t              port,
+       vm_object_offset_t      offset,
+       boolean_t               copy,
+       vm_prot_t               cur_protection,
+       vm_prot_t               max_protection,
+       vm_inherit_t            inheritance)
+{
+       kern_return_t           kr;
+       vm_map_offset_t         vmmaddr;
 
        vmmaddr = (vm_map_offset_t) *address;
 
        /* filter out any kernel-only flags */
 
        vmmaddr = (vm_map_offset_t) *address;
 
        /* filter out any kernel-only flags */
-       if (flags & ~VM_FLAGS_USER_MAP)
+       if (flags & ~VM_FLAGS_USER_MAP) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        kr = vm_map_enter_mem_object(target_map,
 
        kr = vm_map_enter_mem_object(target_map,
-                                      &vmmaddr,
-                                      initial_size,
-                                      mask,
-                                      flags,
-                                      port,
-                                      offset,
-                                      copy,
-                                      cur_protection,
-                                      max_protection,
-                                      inheritance);
+           &vmmaddr,
+           initial_size,
+           mask,
+           flags,
+           vmk_flags,
+           tag,
+           port,
+           offset,
+           copy,
+           cur_protection,
+           max_protection,
+           inheritance);
+
+#if KASAN
+       if (kr == KERN_SUCCESS && target_map->pmap == kernel_pmap) {
+               kasan_notify_address(vmmaddr, initial_size);
+       }
+#endif
 
        *address = vmmaddr;
        return kr;
 
        *address = vmmaddr;
        return kr;
@@ -940,18 +1069,44 @@ mach_vm_map(
 
 /* legacy interface */
 kern_return_t
 
 /* legacy interface */
 kern_return_t
-vm_map_64(
-       vm_map_t                target_map,
-       vm_offset_t             *address,
-       vm_size_t               size,
-       vm_offset_t             mask,
-       int                     flags,
-       ipc_port_t              port,
-       vm_object_offset_t      offset,
-       boolean_t               copy,
-       vm_prot_t               cur_protection,
-       vm_prot_t               max_protection,
-       vm_inherit_t            inheritance)
+vm_map_64_external(
+       vm_map_t                target_map,
+       vm_offset_t             *address,
+       vm_size_t               size,
+       vm_offset_t             mask,
+       int                     flags,
+       ipc_port_t              port,
+       vm_object_offset_t      offset,
+       boolean_t               copy,
+       vm_prot_t               cur_protection,
+       vm_prot_t               max_protection,
+       vm_inherit_t            inheritance)
+{
+       vm_tag_t tag;
+
+       VM_GET_FLAGS_ALIAS(flags, tag);
+       return vm_map_64_kernel(target_map, address, size, mask,
+                  flags, VM_MAP_KERNEL_FLAGS_NONE,
+                  tag, port, offset, copy,
+                  cur_protection, max_protection,
+                  inheritance);
+}
+
+kern_return_t
+vm_map_64_kernel(
+       vm_map_t                target_map,
+       vm_offset_t             *address,
+       vm_size_t               size,
+       vm_offset_t             mask,
+       int                     flags,
+       vm_map_kernel_flags_t   vmk_flags,
+       vm_tag_t                tag,
+       ipc_port_t              port,
+       vm_object_offset_t      offset,
+       boolean_t               copy,
+       vm_prot_t               cur_protection,
+       vm_prot_t               max_protection,
+       vm_inherit_t            inheritance)
 {
        mach_vm_address_t map_addr;
        mach_vm_size_t map_size;
 {
        mach_vm_address_t map_addr;
        mach_vm_size_t map_size;
@@ -962,27 +1117,53 @@ vm_map_64(
        map_size = (mach_vm_size_t)size;
        map_mask = (mach_vm_offset_t)mask;
 
        map_size = (mach_vm_size_t)size;
        map_mask = (mach_vm_offset_t)mask;
 
-       kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags,
-                        port, offset, copy, 
-                        cur_protection, max_protection, inheritance);
+       kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask,
+           flags, vmk_flags, tag,
+           port, offset, copy,
+           cur_protection, max_protection, inheritance);
        *address = CAST_DOWN(vm_offset_t, map_addr);
        return kr;
 }
 
 /* temporary, until world build */
 kern_return_t
        *address = CAST_DOWN(vm_offset_t, map_addr);
        return kr;
 }
 
 /* temporary, until world build */
 kern_return_t
-vm_map(
-       vm_map_t                target_map,
-       vm_offset_t             *address,
-       vm_size_t               size,
-       vm_offset_t             mask,
-       int                     flags,
-       ipc_port_t              port,
-       vm_offset_t             offset,
-       boolean_t               copy,
-       vm_prot_t               cur_protection,
-       vm_prot_t               max_protection,
-       vm_inherit_t            inheritance)
+vm_map_external(
+       vm_map_t                target_map,
+       vm_offset_t             *address,
+       vm_size_t               size,
+       vm_offset_t             mask,
+       int                     flags,
+       ipc_port_t              port,
+       vm_offset_t             offset,
+       boolean_t               copy,
+       vm_prot_t               cur_protection,
+       vm_prot_t               max_protection,
+       vm_inherit_t            inheritance)
+{
+       vm_tag_t tag;
+
+       VM_GET_FLAGS_ALIAS(flags, tag);
+       return vm_map_kernel(target_map, address, size, mask,
+                  flags, VM_MAP_KERNEL_FLAGS_NONE, tag,
+                  port, offset, copy,
+                  cur_protection, max_protection, inheritance);
+}
+
+kern_return_t
+vm_map_kernel(
+       vm_map_t                target_map,
+       vm_offset_t             *address,
+       vm_size_t               size,
+       vm_offset_t             mask,
+       int                     flags,
+       vm_map_kernel_flags_t   vmk_flags,
+       vm_tag_t                tag,
+       ipc_port_t              port,
+       vm_offset_t             offset,
+       boolean_t               copy,
+       vm_prot_t               cur_protection,
+       vm_prot_t               max_protection,
+       vm_inherit_t            inheritance)
 {
        mach_vm_address_t map_addr;
        mach_vm_size_t map_size;
 {
        mach_vm_address_t map_addr;
        mach_vm_size_t map_size;
@@ -995,13 +1176,99 @@ vm_map(
        map_mask = (mach_vm_offset_t)mask;
        obj_offset = (vm_object_offset_t)offset;
 
        map_mask = (mach_vm_offset_t)mask;
        obj_offset = (vm_object_offset_t)offset;
 
-       kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags,
-                        port, obj_offset, copy, 
-                        cur_protection, max_protection, inheritance);
+       kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask,
+           flags, vmk_flags, tag,
+           port, obj_offset, copy,
+           cur_protection, max_protection, inheritance);
        *address = CAST_DOWN(vm_offset_t, map_addr);
        return kr;
 }
 
        *address = CAST_DOWN(vm_offset_t, map_addr);
        return kr;
 }
 
+/*
+ * mach_vm_remap_new -
+ * Behaves like mach_vm_remap, except that VM_FLAGS_RETURN_DATA_ADDR is always set
+ * and {cur,max}_protection are in/out.
+ */
+kern_return_t
+mach_vm_remap_new_external(
+       vm_map_t                target_map,
+       mach_vm_offset_t        *address,
+       mach_vm_size_t  size,
+       mach_vm_offset_t        mask,
+       int                     flags,
+       mach_port_t             src_tport,
+       mach_vm_offset_t        memory_address,
+       boolean_t               copy,
+       vm_prot_t               *cur_protection,   /* IN/OUT */
+       vm_prot_t               *max_protection,   /* IN/OUT */
+       vm_inherit_t            inheritance)
+{
+       vm_tag_t tag;
+       vm_map_offset_t         map_addr;
+       kern_return_t           kr;
+       vm_map_t src_map;
+
+       flags |= VM_FLAGS_RETURN_DATA_ADDR;
+       VM_GET_FLAGS_ALIAS(flags, tag);
+
+       /* filter out any kernel-only flags */
+       if (flags & ~VM_FLAGS_USER_REMAP) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       if (target_map == VM_MAP_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       if ((*cur_protection & ~VM_PROT_ALL) ||
+           (*max_protection & ~VM_PROT_ALL) ||
+           (*cur_protection & *max_protection) != *cur_protection) {
+               return KERN_INVALID_ARGUMENT;
+       }
+       if ((*max_protection & (VM_PROT_WRITE | VM_PROT_EXECUTE)) ==
+           (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
+               /*
+                * XXX FBDP TODO
+                * enforce target's "wx" policies
+                */
+               return KERN_PROTECTION_FAILURE;
+       }
+
+       if (copy || *max_protection == VM_PROT_READ || *max_protection == VM_PROT_NONE) {
+               src_map = convert_port_to_map_read(src_tport);
+       } else {
+               src_map = convert_port_to_map(src_tport);
+       }
+
+       if (src_map == VM_MAP_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       map_addr = (vm_map_offset_t)*address;
+
+       kr = vm_map_remap(target_map,
+           &map_addr,
+           size,
+           mask,
+           flags,
+           VM_MAP_KERNEL_FLAGS_NONE,
+           tag,
+           src_map,
+           memory_address,
+           copy,
+           cur_protection,    /* IN/OUT */
+           max_protection,    /* IN/OUT */
+           inheritance);
+
+       *address = map_addr;
+       vm_map_deallocate(src_map);
+
+       if (kr == KERN_SUCCESS) {
+               ipc_port_release_send(src_tport);  /* consume on success */
+       }
+       return kr;
+}
+
 /*
  * mach_vm_remap -
  * Remap a range of memory from one task into another,
 /*
  * mach_vm_remap -
  * Remap a range of memory from one task into another,
@@ -1009,48 +1276,161 @@ vm_map(
  * over top of itself (with altered permissions and/or
  * as an in-place copy of itself).
  */
  * over top of itself (with altered permissions and/or
  * as an in-place copy of itself).
  */
+kern_return_t
+mach_vm_remap_external(
+       vm_map_t                target_map,
+       mach_vm_offset_t        *address,
+       mach_vm_size_t  size,
+       mach_vm_offset_t        mask,
+       int                     flags,
+       vm_map_t                src_map,
+       mach_vm_offset_t        memory_address,
+       boolean_t               copy,
+       vm_prot_t               *cur_protection,    /* OUT */
+       vm_prot_t               *max_protection,    /* OUT */
+       vm_inherit_t            inheritance)
+{
+       vm_tag_t tag;
+       VM_GET_FLAGS_ALIAS(flags, tag);
+
+       return mach_vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map, memory_address,
+                  copy, cur_protection, max_protection, inheritance);
+}
 
 kern_return_t
 
 kern_return_t
-mach_vm_remap(
-       vm_map_t                target_map,
-       mach_vm_offset_t        *address,
-       mach_vm_size_t  size,
-       mach_vm_offset_t        mask,
-       int                     flags,
-       vm_map_t                src_map,
-       mach_vm_offset_t        memory_address,
-       boolean_t               copy,
-       vm_prot_t               *cur_protection,
-       vm_prot_t               *max_protection,
-       vm_inherit_t            inheritance)
+mach_vm_remap_kernel(
+       vm_map_t                target_map,
+       mach_vm_offset_t        *address,
+       mach_vm_size_t  size,
+       mach_vm_offset_t        mask,
+       int                     flags,
+       vm_tag_t                tag,
+       vm_map_t                src_map,
+       mach_vm_offset_t        memory_address,
+       boolean_t               copy,
+       vm_prot_t               *cur_protection,   /* OUT */
+       vm_prot_t               *max_protection,   /* OUT */
+       vm_inherit_t            inheritance)
 {
 {
-       vm_map_offset_t         map_addr;
-       kern_return_t           kr;
+       vm_map_offset_t         map_addr;
+       kern_return_t           kr;
 
 
-       if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map)
+       if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        /* filter out any kernel-only flags */
 
        /* filter out any kernel-only flags */
-       if (flags & ~VM_FLAGS_USER_REMAP)
+       if (flags & ~VM_FLAGS_USER_REMAP) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        map_addr = (vm_map_offset_t)*address;
 
 
        map_addr = (vm_map_offset_t)*address;
 
+       *cur_protection = VM_PROT_NONE;
+       *max_protection = VM_PROT_NONE;
+
        kr = vm_map_remap(target_map,
        kr = vm_map_remap(target_map,
-                         &map_addr,
-                         size,
-                         mask,
-                         flags,
-                         src_map,
-                         memory_address,
-                         copy,
-                         cur_protection,
-                         max_protection,
-                         inheritance);
+           &map_addr,
+           size,
+           mask,
+           flags,
+           VM_MAP_KERNEL_FLAGS_NONE,
+           tag,
+           src_map,
+           memory_address,
+           copy,
+           cur_protection,    /* IN/OUT */
+           max_protection,    /* IN/OUT */
+           inheritance);
        *address = map_addr;
        return kr;
 }
 
        *address = map_addr;
        return kr;
 }
 
+/*
+ * vm_remap_new -
+ * Behaves like vm_remap, except that VM_FLAGS_RETURN_DATA_ADDR is always set
+ * and {cur,max}_protection are in/out.
+ */
+kern_return_t
+vm_remap_new_external(
+       vm_map_t                target_map,
+       vm_offset_t             *address,
+       vm_size_t               size,
+       vm_offset_t             mask,
+       int                     flags,
+       mach_port_t             src_tport,
+       vm_offset_t             memory_address,
+       boolean_t               copy,
+       vm_prot_t               *cur_protection,       /* IN/OUT */
+       vm_prot_t               *max_protection,       /* IN/OUT */
+       vm_inherit_t            inheritance)
+{
+       vm_tag_t tag;
+       vm_map_offset_t         map_addr;
+       kern_return_t           kr;
+       vm_map_t src_map;
+
+       flags |= VM_FLAGS_RETURN_DATA_ADDR;
+       VM_GET_FLAGS_ALIAS(flags, tag);
+
+       /* filter out any kernel-only flags */
+       if (flags & ~VM_FLAGS_USER_REMAP) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       if (target_map == VM_MAP_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       if ((*cur_protection & ~VM_PROT_ALL) ||
+           (*max_protection & ~VM_PROT_ALL) ||
+           (*cur_protection & *max_protection) != *cur_protection) {
+               return KERN_INVALID_ARGUMENT;
+       }
+       if ((*max_protection & (VM_PROT_WRITE | VM_PROT_EXECUTE)) ==
+           (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
+               /*
+                * XXX FBDP TODO
+                * enforce target's "wx" policies
+                */
+               return KERN_PROTECTION_FAILURE;
+       }
+
+       if (copy || *max_protection == VM_PROT_READ || *max_protection == VM_PROT_NONE) {
+               src_map = convert_port_to_map_read(src_tport);
+       } else {
+               src_map = convert_port_to_map(src_tport);
+       }
+
+       if (src_map == VM_MAP_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       map_addr = (vm_map_offset_t)*address;
+
+       kr = vm_map_remap(target_map,
+           &map_addr,
+           size,
+           mask,
+           flags,
+           VM_MAP_KERNEL_FLAGS_NONE,
+           tag,
+           src_map,
+           memory_address,
+           copy,
+           cur_protection,   /* IN/OUT */
+           max_protection,   /* IN/OUT */
+           inheritance);
+
+       *address = CAST_DOWN(vm_offset_t, map_addr);
+       vm_map_deallocate(src_map);
+
+       if (kr == KERN_SUCCESS) {
+               ipc_port_release_send(src_tport); /* consume on success */
+       }
+       return kr;
+}
+
 /*
  * vm_remap -
  * Remap a range of memory from one task into another,
 /*
  * vm_remap -
  * Remap a range of memory from one task into another,
@@ -1063,42 +1443,71 @@ mach_vm_remap(
  * kernel context).
  */
 kern_return_t
  * kernel context).
  */
 kern_return_t
-vm_remap(
-       vm_map_t                target_map,
-       vm_offset_t             *address,
-       vm_size_t               size,
-       vm_offset_t             mask,
-       int                     flags,
-       vm_map_t                src_map,
-       vm_offset_t             memory_address,
-       boolean_t               copy,
-       vm_prot_t               *cur_protection,
-       vm_prot_t               *max_protection,
-       vm_inherit_t            inheritance)
+vm_remap_external(
+       vm_map_t                target_map,
+       vm_offset_t             *address,
+       vm_size_t               size,
+       vm_offset_t             mask,
+       int                     flags,
+       vm_map_t                src_map,
+       vm_offset_t             memory_address,
+       boolean_t               copy,
+       vm_prot_t               *cur_protection,    /* OUT */
+       vm_prot_t               *max_protection,    /* OUT */
+       vm_inherit_t            inheritance)
+{
+       vm_tag_t tag;
+       VM_GET_FLAGS_ALIAS(flags, tag);
+
+       return vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map,
+                  memory_address, copy, cur_protection, max_protection, inheritance);
+}
+
+kern_return_t
+vm_remap_kernel(
+       vm_map_t                target_map,
+       vm_offset_t             *address,
+       vm_size_t               size,
+       vm_offset_t             mask,
+       int                     flags,
+       vm_tag_t                tag,
+       vm_map_t                src_map,
+       vm_offset_t             memory_address,
+       boolean_t               copy,
+       vm_prot_t               *cur_protection,    /* OUT */
+       vm_prot_t               *max_protection,    /* OUT */
+       vm_inherit_t            inheritance)
 {
 {
-       vm_map_offset_t         map_addr;
-       kern_return_t           kr;
+       vm_map_offset_t         map_addr;
+       kern_return_t           kr;
 
 
-       if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map)
+       if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        /* filter out any kernel-only flags */
 
        /* filter out any kernel-only flags */
-       if (flags & ~VM_FLAGS_USER_REMAP)
+       if (flags & ~VM_FLAGS_USER_REMAP) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        map_addr = (vm_map_offset_t)*address;
 
 
        map_addr = (vm_map_offset_t)*address;
 
+       *cur_protection = VM_PROT_NONE;
+       *max_protection = VM_PROT_NONE;
+
        kr = vm_map_remap(target_map,
        kr = vm_map_remap(target_map,
-                         &map_addr,
-                         size,
-                         mask,
-                         flags,
-                         src_map,
-                         memory_address,
-                         copy,
-                         cur_protection,
-                         max_protection,
-                         inheritance);
+           &map_addr,
+           size,
+           mask,
+           flags,
+           VM_MAP_KERNEL_FLAGS_NONE,
+           tag,
+           src_map,
+           memory_address,
+           copy,
+           cur_protection,   /* IN/OUT */
+           max_protection,   /* IN/OUT */
+           inheritance);
        *address = CAST_DOWN(vm_offset_t, map_addr);
        return kr;
 }
        *address = CAST_DOWN(vm_offset_t, map_addr);
        return kr;
 }
@@ -1117,41 +1526,54 @@ vm_remap(
  *     [ To unwire the pages, specify VM_PROT_NONE. ]
  */
 kern_return_t
  *     [ To unwire the pages, specify VM_PROT_NONE. ]
  */
 kern_return_t
-mach_vm_wire(
-       host_priv_t             host_priv,
-       vm_map_t                map,
-       mach_vm_offset_t        start,
-       mach_vm_size_t  size,
-       vm_prot_t               access)
+mach_vm_wire_external(
+       host_priv_t             host_priv,
+       vm_map_t                map,
+       mach_vm_offset_t        start,
+       mach_vm_size_t  size,
+       vm_prot_t               access)
 {
 {
-       kern_return_t           rc;
+       return mach_vm_wire_kernel(host_priv, map, start, size, access, VM_KERN_MEMORY_MLOCK);
+}
 
 
-       if (host_priv == HOST_PRIV_NULL)
-               return KERN_INVALID_HOST;
+kern_return_t
+mach_vm_wire_kernel(
+       host_priv_t             host_priv,
+       vm_map_t                map,
+       mach_vm_offset_t        start,
+       mach_vm_size_t  size,
+       vm_prot_t               access,
+       vm_tag_t                tag)
+{
+       kern_return_t           rc;
 
 
-       assert(host_priv == &realhost);
+       if (host_priv == HOST_PRIV_NULL) {
+               return KERN_INVALID_HOST;
+       }
 
 
-       if (map == VM_MAP_NULL)
+       if (map == VM_MAP_NULL) {
                return KERN_INVALID_TASK;
                return KERN_INVALID_TASK;
+       }
 
 
-       if (access & ~VM_PROT_ALL || (start + size < start))
+       if (access & ~VM_PROT_ALL || (start + size < start)) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        if (access != VM_PROT_NONE) {
 
        if (access != VM_PROT_NONE) {
-               rc = vm_map_wire(map,
-                                vm_map_trunc_page(start,
-                                                  VM_MAP_PAGE_MASK(map)),
-                                vm_map_round_page(start+size,
-                                                  VM_MAP_PAGE_MASK(map)),
-                                access | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_MLOCK),
-                                TRUE);
+               rc = vm_map_wire_kernel(map,
+                   vm_map_trunc_page(start,
+                   VM_MAP_PAGE_MASK(map)),
+                   vm_map_round_page(start + size,
+                   VM_MAP_PAGE_MASK(map)),
+                   access, tag,
+                   TRUE);
        } else {
                rc = vm_map_unwire(map,
        } else {
                rc = vm_map_unwire(map,
-                                  vm_map_trunc_page(start,
-                                                    VM_MAP_PAGE_MASK(map)),
-                                  vm_map_round_page(start+size,
-                                                    VM_MAP_PAGE_MASK(map)),
-                                  TRUE);
+                   vm_map_trunc_page(start,
+                   VM_MAP_PAGE_MASK(map)),
+                   vm_map_round_page(start + size,
+                   VM_MAP_PAGE_MASK(map)),
+                   TRUE);
        }
        return rc;
 }
        }
        return rc;
 }
@@ -1166,42 +1588,43 @@ mach_vm_wire(
  */
 kern_return_t
 vm_wire(
  */
 kern_return_t
 vm_wire(
-       host_priv_t             host_priv,
-       vm_map_t                map,
-       vm_offset_t             start,
-       vm_size_t               size,
-       vm_prot_t               access)
+       host_priv_t             host_priv,
+       vm_map_t                map,
+       vm_offset_t             start,
+       vm_size_t               size,
+       vm_prot_t               access)
 {
 {
-       kern_return_t           rc;
+       kern_return_t           rc;
 
 
-       if (host_priv == HOST_PRIV_NULL)
+       if (host_priv == HOST_PRIV_NULL) {
                return KERN_INVALID_HOST;
                return KERN_INVALID_HOST;
+       }
 
 
-       assert(host_priv == &realhost);
-
-       if (map == VM_MAP_NULL)
+       if (map == VM_MAP_NULL) {
                return KERN_INVALID_TASK;
                return KERN_INVALID_TASK;
+       }
 
 
-       if ((access & ~VM_PROT_ALL) || (start + size < start))
+       if ((access & ~VM_PROT_ALL) || (start + size < start)) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        if (size == 0) {
                rc = KERN_SUCCESS;
        } else if (access != VM_PROT_NONE) {
 
        if (size == 0) {
                rc = KERN_SUCCESS;
        } else if (access != VM_PROT_NONE) {
-               rc = vm_map_wire(map,
-                                vm_map_trunc_page(start,
-                                                  VM_MAP_PAGE_MASK(map)),
-                                vm_map_round_page(start+size,
-                                                  VM_MAP_PAGE_MASK(map)),
-                                access | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
-                                TRUE);
+               rc = vm_map_wire_kernel(map,
+                   vm_map_trunc_page(start,
+                   VM_MAP_PAGE_MASK(map)),
+                   vm_map_round_page(start + size,
+                   VM_MAP_PAGE_MASK(map)),
+                   access, VM_KERN_MEMORY_OSFMK,
+                   TRUE);
        } else {
                rc = vm_map_unwire(map,
        } else {
                rc = vm_map_unwire(map,
-                                  vm_map_trunc_page(start,
-                                                    VM_MAP_PAGE_MASK(map)),
-                                  vm_map_round_page(start+size,
-                                                    VM_MAP_PAGE_MASK(map)),
-                                  TRUE);
+                   vm_map_trunc_page(start,
+                   VM_MAP_PAGE_MASK(map)),
+                   vm_map_round_page(start + size,
+                   VM_MAP_PAGE_MASK(map)),
+                   TRUE);
        }
        return rc;
 }
        }
        return rc;
 }
@@ -1239,19 +1662,19 @@ vm_wire(
 
 kern_return_t
 mach_vm_msync(
 
 kern_return_t
 mach_vm_msync(
-       vm_map_t                map,
-       mach_vm_address_t       address,
-       mach_vm_size_t  size,
-       vm_sync_t               sync_flags)
+       vm_map_t                map,
+       mach_vm_address_t       address,
+       mach_vm_size_t  size,
+       vm_sync_t               sync_flags)
 {
 {
-
-       if (map == VM_MAP_NULL)
-               return(KERN_INVALID_TASK);
+       if (map == VM_MAP_NULL) {
+               return KERN_INVALID_TASK;
+       }
 
        return vm_map_msync(map, (vm_map_address_t)address,
 
        return vm_map_msync(map, (vm_map_address_t)address,
-                       (vm_map_size_t)size, sync_flags);
+                  (vm_map_size_t)size, sync_flags);
 }
 }
-      
+
 /*
  *     vm_msync
  *
 /*
  *     vm_msync
  *
@@ -1288,17 +1711,17 @@ mach_vm_msync(
 
 kern_return_t
 vm_msync(
 
 kern_return_t
 vm_msync(
-       vm_map_t        map,
-       vm_address_t    address,
-       vm_size_t       size,
-       vm_sync_t       sync_flags)
+       vm_map_t        map,
+       vm_address_t    address,
+       vm_size_t       size,
+       vm_sync_t       sync_flags)
 {
 {
-
-       if (map == VM_MAP_NULL)
-               return(KERN_INVALID_TASK);
+       if (map == VM_MAP_NULL) {
+               return KERN_INVALID_TASK;
+       }
 
        return vm_map_msync(map, (vm_map_address_t)address,
 
        return vm_map_msync(map, (vm_map_address_t)address,
-                       (vm_map_size_t)size, sync_flags);
+                  (vm_map_size_t)size, sync_flags);
 }
 
 
 }
 
 
@@ -1306,11 +1729,11 @@ int
 vm_toggle_entry_reuse(int toggle, int *old_value)
 {
        vm_map_t map = current_map();
 vm_toggle_entry_reuse(int toggle, int *old_value)
 {
        vm_map_t map = current_map();
-       
+
        assert(!map->is_nested_map);
        assert(!map->is_nested_map);
-       if(toggle == VM_TOGGLE_GETVALUE && old_value != NULL){
+       if (toggle == VM_TOGGLE_GETVALUE && old_value != NULL) {
                *old_value = map->disable_vmentry_reuse;
                *old_value = map->disable_vmentry_reuse;
-       } else if(toggle == VM_TOGGLE_SET){
+       } else if (toggle == VM_TOGGLE_SET) {
                vm_map_entry_t map_to_entry;
 
                vm_map_lock(map);
                vm_map_entry_t map_to_entry;
 
                vm_map_lock(map);
@@ -1323,18 +1746,19 @@ vm_toggle_entry_reuse(int toggle, int *old_value)
                        map->highest_entry_end = map->first_free->vme_end;
                }
                vm_map_unlock(map);
                        map->highest_entry_end = map->first_free->vme_end;
                }
                vm_map_unlock(map);
-       } else if (toggle == VM_TOGGLE_CLEAR){
+       } else if (toggle == VM_TOGGLE_CLEAR) {
                vm_map_lock(map);
                map->disable_vmentry_reuse = FALSE;
                vm_map_unlock(map);
                vm_map_lock(map);
                map->disable_vmentry_reuse = FALSE;
                vm_map_unlock(map);
-       } else
+       } else {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        return KERN_SUCCESS;
 }
 
 /*
 
        return KERN_SUCCESS;
 }
 
 /*
- *     mach_vm_behavior_set 
+ *     mach_vm_behavior_set
  *
  *     Sets the paging behavior attribute for the  specified range
  *     in the specified map.
  *
  *     Sets the paging behavior attribute for the  specified range
  *     in the specified map.
@@ -1342,20 +1766,22 @@ vm_toggle_entry_reuse(int toggle, int *old_value)
  *     This routine will fail with KERN_INVALID_ADDRESS if any address
  *     in [start,start+size) is not a valid allocated memory region.
  */
  *     This routine will fail with KERN_INVALID_ADDRESS if any address
  *     in [start,start+size) is not a valid allocated memory region.
  */
-kern_return_t 
+kern_return_t
 mach_vm_behavior_set(
 mach_vm_behavior_set(
-       vm_map_t                map,
-       mach_vm_offset_t        start,
-       mach_vm_size_t          size,
-       vm_behavior_t           new_behavior)
+       vm_map_t                map,
+       mach_vm_offset_t        start,
+       mach_vm_size_t          size,
+       vm_behavior_t           new_behavior)
 {
 {
-       vm_map_offset_t align_mask;
+       vm_map_offset_t align_mask;
 
 
-       if ((map == VM_MAP_NULL) || (start + size < start))
-               return(KERN_INVALID_ARGUMENT);
+       if ((map == VM_MAP_NULL) || (start + size < start)) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
 
-       if (size == 0)
+       if (size == 0) {
                return KERN_SUCCESS;
                return KERN_SUCCESS;
+       }
 
        switch (new_behavior) {
        case VM_BEHAVIOR_REUSABLE:
 
        switch (new_behavior) {
        case VM_BEHAVIOR_REUSABLE:
@@ -1374,13 +1800,13 @@ mach_vm_behavior_set(
        }
 
        return vm_map_behavior_set(map,
        }
 
        return vm_map_behavior_set(map,
-                                  vm_map_trunc_page(start, align_mask),
-                                  vm_map_round_page(start+size, align_mask),
-                                  new_behavior);
+                  vm_map_trunc_page(start, align_mask),
+                  vm_map_round_page(start + size, align_mask),
+                  new_behavior);
 }
 
 /*
 }
 
 /*
- *     vm_behavior_set 
+ *     vm_behavior_set
  *
  *     Sets the paging behavior attribute for the  specified range
  *     in the specified map.
  *
  *     Sets the paging behavior attribute for the  specified range
  *     in the specified map.
@@ -1392,20 +1818,21 @@ mach_vm_behavior_set(
  *     use of vm_offset_t (if the map provided is larger than the
  *     kernel's).
  */
  *     use of vm_offset_t (if the map provided is larger than the
  *     kernel's).
  */
-kern_return_t 
+kern_return_t
 vm_behavior_set(
 vm_behavior_set(
-       vm_map_t                map,
-       vm_offset_t             start,
-       vm_size_t               size,
-       vm_behavior_t           new_behavior)
+       vm_map_t                map,
+       vm_offset_t             start,
+       vm_size_t               size,
+       vm_behavior_t           new_behavior)
 {
 {
-       if (start + size < start)
+       if (start + size < start) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        return mach_vm_behavior_set(map,
 
        return mach_vm_behavior_set(map,
-                                   (mach_vm_offset_t) start,
-                                   (mach_vm_size_t) size,
-                                   new_behavior);
+                  (mach_vm_offset_t) start,
+                  (mach_vm_size_t) size,
+                  new_behavior);
 }
 
 /*
 }
 
 /*
@@ -1424,32 +1851,34 @@ vm_behavior_set(
 
 kern_return_t
 mach_vm_region(
 
 kern_return_t
 mach_vm_region(
-       vm_map_t                 map,
-       mach_vm_offset_t        *address,               /* IN/OUT */
-       mach_vm_size_t  *size,                  /* OUT */
-       vm_region_flavor_t       flavor,                /* IN */
-       vm_region_info_t         info,                  /* OUT */
-       mach_msg_type_number_t  *count,                 /* IN/OUT */
-       mach_port_t             *object_name)           /* OUT */
+       vm_map_t                 map,
+       mach_vm_offset_t        *address,               /* IN/OUT */
+       mach_vm_size_t          *size,                  /* OUT */
+       vm_region_flavor_t       flavor,                /* IN */
+       vm_region_info_t         info,                  /* OUT */
+       mach_msg_type_number_t  *count,                 /* IN/OUT */
+       mach_port_t             *object_name)           /* OUT */
 {
 {
-       vm_map_offset_t         map_addr;
-       vm_map_size_t           map_size;
-       kern_return_t           kr;
+       vm_map_offset_t         map_addr;
+       vm_map_size_t           map_size;
+       kern_return_t           kr;
 
 
-       if (VM_MAP_NULL == map)
+       if (VM_MAP_NULL == map) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        map_addr = (vm_map_offset_t)*address;
        map_size = (vm_map_size_t)*size;
 
        /* legacy conversion */
 
        map_addr = (vm_map_offset_t)*address;
        map_size = (vm_map_size_t)*size;
 
        /* legacy conversion */
-       if (VM_REGION_BASIC_INFO == flavor)
+       if (VM_REGION_BASIC_INFO == flavor) {
                flavor = VM_REGION_BASIC_INFO_64;
                flavor = VM_REGION_BASIC_INFO_64;
+       }
 
        kr = vm_map_region(map,
 
        kr = vm_map_region(map,
-                          &map_addr, &map_size,
-                          flavor, info, count,
-                          object_name);
+           &map_addr, &map_size,
+           flavor, info, count,
+           object_name);
 
        *address = map_addr;
        *size = map_size;
 
        *address = map_addr;
        *size = map_size;
@@ -1472,71 +1901,76 @@ mach_vm_region(
 
 kern_return_t
 vm_region_64(
 
 kern_return_t
 vm_region_64(
-       vm_map_t                 map,
-       vm_offset_t             *address,               /* IN/OUT */
-       vm_size_t               *size,                  /* OUT */
-       vm_region_flavor_t       flavor,                /* IN */
-       vm_region_info_t         info,                  /* OUT */
-       mach_msg_type_number_t  *count,                 /* IN/OUT */
-       mach_port_t             *object_name)           /* OUT */
+       vm_map_t                 map,
+       vm_offset_t             *address,               /* IN/OUT */
+       vm_size_t               *size,                  /* OUT */
+       vm_region_flavor_t       flavor,                /* IN */
+       vm_region_info_t         info,                  /* OUT */
+       mach_msg_type_number_t  *count,                 /* IN/OUT */
+       mach_port_t             *object_name)           /* OUT */
 {
 {
-       vm_map_offset_t         map_addr;
-       vm_map_size_t           map_size;
-       kern_return_t           kr;
+       vm_map_offset_t         map_addr;
+       vm_map_size_t           map_size;
+       kern_return_t           kr;
 
 
-       if (VM_MAP_NULL == map)
+       if (VM_MAP_NULL == map) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        map_addr = (vm_map_offset_t)*address;
        map_size = (vm_map_size_t)*size;
 
        /* legacy conversion */
 
        map_addr = (vm_map_offset_t)*address;
        map_size = (vm_map_size_t)*size;
 
        /* legacy conversion */
-       if (VM_REGION_BASIC_INFO == flavor)
+       if (VM_REGION_BASIC_INFO == flavor) {
                flavor = VM_REGION_BASIC_INFO_64;
                flavor = VM_REGION_BASIC_INFO_64;
+       }
 
        kr = vm_map_region(map,
 
        kr = vm_map_region(map,
-                          &map_addr, &map_size,
-                          flavor, info, count,
-                          object_name);
+           &map_addr, &map_size,
+           flavor, info, count,
+           object_name);
 
        *address = CAST_DOWN(vm_offset_t, map_addr);
        *size = CAST_DOWN(vm_size_t, map_size);
 
 
        *address = CAST_DOWN(vm_offset_t, map_addr);
        *size = CAST_DOWN(vm_size_t, map_size);
 
-       if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
+       if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) {
                return KERN_INVALID_ADDRESS;
                return KERN_INVALID_ADDRESS;
+       }
        return kr;
 }
 
 kern_return_t
 vm_region(
        return kr;
 }
 
 kern_return_t
 vm_region(
-       vm_map_t                        map,
-       vm_address_t                    *address,       /* IN/OUT */
-       vm_size_t                       *size,          /* OUT */
-       vm_region_flavor_t              flavor, /* IN */
-       vm_region_info_t                info,           /* OUT */
-       mach_msg_type_number_t  *count, /* IN/OUT */
-       mach_port_t                     *object_name)   /* OUT */
+       vm_map_t                        map,
+       vm_address_t                    *address,       /* IN/OUT */
+       vm_size_t                       *size,          /* OUT */
+       vm_region_flavor_t              flavor, /* IN */
+       vm_region_info_t                info,           /* OUT */
+       mach_msg_type_number_t  *count, /* IN/OUT */
+       mach_port_t                     *object_name)   /* OUT */
 {
 {
-       vm_map_address_t        map_addr;
-       vm_map_size_t           map_size;
-       kern_return_t           kr;
+       vm_map_address_t        map_addr;
+       vm_map_size_t           map_size;
+       kern_return_t           kr;
 
 
-       if (VM_MAP_NULL == map)
+       if (VM_MAP_NULL == map) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        map_addr = (vm_map_address_t)*address;
        map_size = (vm_map_size_t)*size;
 
        kr = vm_map_region(map,
 
        map_addr = (vm_map_address_t)*address;
        map_size = (vm_map_size_t)*size;
 
        kr = vm_map_region(map,
-                          &map_addr, &map_size,
-                          flavor, info, count,
-                          object_name);
+           &map_addr, &map_size,
+           flavor, info, count,
+           object_name);
 
        *address = CAST_DOWN(vm_address_t, map_addr);
        *size = CAST_DOWN(vm_size_t, map_size);
 
 
        *address = CAST_DOWN(vm_address_t, map_addr);
        *size = CAST_DOWN(vm_size_t, map_size);
 
-       if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
+       if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) {
                return KERN_INVALID_ADDRESS;
                return KERN_INVALID_ADDRESS;
+       }
        return kr;
 }
 
        return kr;
 }
 
@@ -1547,30 +1981,31 @@ vm_region(
  */
 kern_return_t
 mach_vm_region_recurse(
  */
 kern_return_t
 mach_vm_region_recurse(
-       vm_map_t                        map,
-       mach_vm_address_t               *address,
-       mach_vm_size_t          *size,
-       uint32_t                        *depth,
-       vm_region_recurse_info_t        info,
-       mach_msg_type_number_t  *infoCnt)
+       vm_map_t                        map,
+       mach_vm_address_t               *address,
+       mach_vm_size_t          *size,
+       uint32_t                        *depth,
+       vm_region_recurse_info_t        info,
+       mach_msg_type_number_t  *infoCnt)
 {
 {
-       vm_map_address_t        map_addr;
-       vm_map_size_t           map_size;
-       kern_return_t           kr;
+       vm_map_address_t        map_addr;
+       vm_map_size_t           map_size;
+       kern_return_t           kr;
 
 
-       if (VM_MAP_NULL == map)
+       if (VM_MAP_NULL == map) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        map_addr = (vm_map_address_t)*address;
        map_size = (vm_map_size_t)*size;
 
        kr = vm_map_region_recurse_64(
 
        map_addr = (vm_map_address_t)*address;
        map_size = (vm_map_size_t)*size;
 
        kr = vm_map_region_recurse_64(
-                       map,
-                       &map_addr,
-                       &map_size,
-                       depth,
-                       (vm_region_submap_info_64_t)info,
-                       infoCnt);
+               map,
+               &map_addr,
+               &map_size,
+               depth,
+               (vm_region_submap_info_64_t)info,
+               infoCnt);
 
        *address = map_addr;
        *size = map_size;
 
        *address = map_addr;
        *size = map_size;
@@ -1584,125 +2019,141 @@ mach_vm_region_recurse(
  */
 kern_return_t
 vm_region_recurse_64(
  */
 kern_return_t
 vm_region_recurse_64(
-       vm_map_t                        map,
-       vm_address_t                    *address,
-       vm_size_t                       *size,
-       uint32_t                        *depth,
-       vm_region_recurse_info_64_t     info,
-       mach_msg_type_number_t  *infoCnt)
+       vm_map_t                        map,
+       vm_address_t                    *address,
+       vm_size_t                       *size,
+       uint32_t                        *depth,
+       vm_region_recurse_info_64_t     info,
+       mach_msg_type_number_t  *infoCnt)
 {
 {
-       vm_map_address_t        map_addr;
-       vm_map_size_t           map_size;
-       kern_return_t           kr;
+       vm_map_address_t        map_addr;
+       vm_map_size_t           map_size;
+       kern_return_t           kr;
 
 
-       if (VM_MAP_NULL == map)
+       if (VM_MAP_NULL == map) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        map_addr = (vm_map_address_t)*address;
        map_size = (vm_map_size_t)*size;
 
        kr = vm_map_region_recurse_64(
 
        map_addr = (vm_map_address_t)*address;
        map_size = (vm_map_size_t)*size;
 
        kr = vm_map_region_recurse_64(
-                       map,
-                       &map_addr,
-                       &map_size,
-                       depth,
-                       (vm_region_submap_info_64_t)info,
-                       infoCnt);
+               map,
+               &map_addr,
+               &map_size,
+               depth,
+               (vm_region_submap_info_64_t)info,
+               infoCnt);
 
        *address = CAST_DOWN(vm_address_t, map_addr);
        *size = CAST_DOWN(vm_size_t, map_size);
 
 
        *address = CAST_DOWN(vm_address_t, map_addr);
        *size = CAST_DOWN(vm_size_t, map_size);
 
-       if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
+       if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) {
                return KERN_INVALID_ADDRESS;
                return KERN_INVALID_ADDRESS;
+       }
        return kr;
 }
 
 kern_return_t
 vm_region_recurse(
        return kr;
 }
 
 kern_return_t
 vm_region_recurse(
-       vm_map_t                        map,
-       vm_offset_t             *address,       /* IN/OUT */
-       vm_size_t                       *size,          /* OUT */
-       natural_t                       *depth, /* IN/OUT */
-       vm_region_recurse_info_t        info32, /* IN/OUT */
-       mach_msg_type_number_t  *infoCnt)       /* IN/OUT */
+       vm_map_t                        map,
+       vm_offset_t             *address,       /* IN/OUT */
+       vm_size_t                       *size,          /* OUT */
+       natural_t                       *depth, /* IN/OUT */
+       vm_region_recurse_info_t        info32, /* IN/OUT */
+       mach_msg_type_number_t  *infoCnt)       /* IN/OUT */
 {
        vm_region_submap_info_data_64_t info64;
        vm_region_submap_info_t info;
 {
        vm_region_submap_info_data_64_t info64;
        vm_region_submap_info_t info;
-       vm_map_address_t        map_addr;
-       vm_map_size_t           map_size;
-       kern_return_t           kr;
+       vm_map_address_t        map_addr;
+       vm_map_size_t           map_size;
+       kern_return_t           kr;
 
 
-       if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT)
+       if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
+
 
 
-       
        map_addr = (vm_map_address_t)*address;
        map_size = (vm_map_size_t)*size;
        info = (vm_region_submap_info_t)info32;
        *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
 
        map_addr = (vm_map_address_t)*address;
        map_size = (vm_map_size_t)*size;
        info = (vm_region_submap_info_t)info32;
        *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
 
-       kr = vm_map_region_recurse_64(map, &map_addr,&map_size,
-                                     depth, &info64, infoCnt);
+       kr = vm_map_region_recurse_64(map, &map_addr, &map_size,
+           depth, &info64, infoCnt);
 
        info->protection = info64.protection;
        info->max_protection = info64.max_protection;
        info->inheritance = info64.inheritance;
        info->offset = (uint32_t)info64.offset; /* trouble-maker */
 
        info->protection = info64.protection;
        info->max_protection = info64.max_protection;
        info->inheritance = info64.inheritance;
        info->offset = (uint32_t)info64.offset; /* trouble-maker */
-        info->user_tag = info64.user_tag;
-        info->pages_resident = info64.pages_resident;
-        info->pages_shared_now_private = info64.pages_shared_now_private;
-        info->pages_swapped_out = info64.pages_swapped_out;
-        info->pages_dirtied = info64.pages_dirtied;
-        info->ref_count = info64.ref_count;
-        info->shadow_depth = info64.shadow_depth;
-        info->external_pager = info64.external_pager;
-        info->share_mode = info64.share_mode;
+       info->user_tag = info64.user_tag;
+       info->pages_resident = info64.pages_resident;
+       info->pages_shared_now_private = info64.pages_shared_now_private;
+       info->pages_swapped_out = info64.pages_swapped_out;
+       info->pages_dirtied = info64.pages_dirtied;
+       info->ref_count = info64.ref_count;
+       info->shadow_depth = info64.shadow_depth;
+       info->external_pager = info64.external_pager;
+       info->share_mode = info64.share_mode;
        info->is_submap = info64.is_submap;
        info->behavior = info64.behavior;
        info->object_id = info64.object_id;
        info->is_submap = info64.is_submap;
        info->behavior = info64.behavior;
        info->object_id = info64.object_id;
-       info->user_wired_count = info64.user_wired_count; 
+       info->user_wired_count = info64.user_wired_count;
 
        *address = CAST_DOWN(vm_address_t, map_addr);
        *size = CAST_DOWN(vm_size_t, map_size);
        *infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
 
 
        *address = CAST_DOWN(vm_address_t, map_addr);
        *size = CAST_DOWN(vm_size_t, map_size);
        *infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
 
-       if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS)
+       if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) {
                return KERN_INVALID_ADDRESS;
                return KERN_INVALID_ADDRESS;
+       }
        return kr;
 }
 
 kern_return_t
 mach_vm_purgable_control(
        return kr;
 }
 
 kern_return_t
 mach_vm_purgable_control(
-       vm_map_t                map,
-       mach_vm_offset_t        address,
-       vm_purgable_t           control,
-       int                     *state)
+       vm_map_t                map,
+       mach_vm_offset_t        address,
+       vm_purgable_t           control,
+       int                     *state)
 {
 {
-       if (VM_MAP_NULL == map)
+       if (VM_MAP_NULL == map) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
+
+       if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
+               /* not allowed from user-space */
+               return KERN_INVALID_ARGUMENT;
+       }
 
        return vm_map_purgable_control(map,
 
        return vm_map_purgable_control(map,
-                                      vm_map_trunc_page(address, PAGE_MASK),
-                                      control,
-                                      state);
+                  vm_map_trunc_page(address, VM_MAP_PAGE_MASK(map)),
+                  control,
+                  state);
 }
 
 kern_return_t
 vm_purgable_control(
 }
 
 kern_return_t
 vm_purgable_control(
-       vm_map_t                map,
-       vm_offset_t             address,
-       vm_purgable_t           control,
-       int                     *state)
+       vm_map_t                map,
+       vm_offset_t             address,
+       vm_purgable_t           control,
+       int                     *state)
 {
 {
-       if (VM_MAP_NULL == map)
+       if (VM_MAP_NULL == map) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
+
+       if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
+               /* not allowed from user-space */
+               return KERN_INVALID_ARGUMENT;
+       }
 
        return vm_map_purgable_control(map,
 
        return vm_map_purgable_control(map,
-                                      vm_map_trunc_page(address, PAGE_MASK),
-                                      control,
-                                      state);
+                  vm_map_trunc_page(address, VM_MAP_PAGE_MASK(map)),
+                  control,
+                  state);
 }
 }
-                                       
+
 
 /*
  *     Ordinarily, the right to allocate CPM is restricted
 
 /*
  *     Ordinarily, the right to allocate CPM is restricted
@@ -1710,7 +2161,7 @@ vm_purgable_control(
  *     to the host priv port).  Set this variable to zero if
  *     you want to let any application allocate CPM.
  */
  *     to the host priv port).  Set this variable to zero if
  *     you want to let any application allocate CPM.
  */
-unsigned int   vm_allocate_cpm_privileged = 0;
+unsigned int    vm_allocate_cpm_privileged = 0;
 
 /*
  *     Allocate memory in the specified map, with the caveat that
 
 /*
  *     Allocate memory in the specified map, with the caveat that
@@ -1724,29 +2175,31 @@ unsigned int    vm_allocate_cpm_privileged = 0;
  */
 kern_return_t
 vm_allocate_cpm(
  */
 kern_return_t
 vm_allocate_cpm(
-       host_priv_t             host_priv,
-       vm_map_t                map,
-       vm_address_t            *addr,
-       vm_size_t               size,
-       int                     flags)
+       host_priv_t             host_priv,
+       vm_map_t                map,
+       vm_address_t            *addr,
+       vm_size_t               size,
+       int                     flags)
 {
 {
-       vm_map_address_t        map_addr;
-       vm_map_size_t           map_size;
-       kern_return_t           kr;
+       vm_map_address_t        map_addr;
+       vm_map_size_t           map_size;
+       kern_return_t           kr;
 
 
-       if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv)
+       if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv) {
                return KERN_INVALID_HOST;
                return KERN_INVALID_HOST;
+       }
 
 
-       if (VM_MAP_NULL == map)
+       if (VM_MAP_NULL == map) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        map_addr = (vm_map_address_t)*addr;
        map_size = (vm_map_size_t)size;
 
        kr = vm_map_enter_cpm(map,
 
        map_addr = (vm_map_address_t)*addr;
        map_size = (vm_map_size_t)size;
 
        kr = vm_map_enter_cpm(map,
-                             &map_addr,
-                             map_size,
-                             flags);
+           &map_addr,
+           map_size,
+           flags);
 
        *addr = CAST_DOWN(vm_address_t, map_addr);
        return kr;
 
        *addr = CAST_DOWN(vm_address_t, map_addr);
        return kr;
@@ -1755,13 +2208,14 @@ vm_allocate_cpm(
 
 kern_return_t
 mach_vm_page_query(
 
 kern_return_t
 mach_vm_page_query(
-       vm_map_t                map,
-       mach_vm_offset_t        offset,
-       int                     *disposition,
-       int                     *ref_count)
+       vm_map_t                map,
+       mach_vm_offset_t        offset,
+       int                     *disposition,
+       int                     *ref_count)
 {
 {
-       if (VM_MAP_NULL == map)
+       if (VM_MAP_NULL == map) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        return vm_map_page_query_internal(
                map,
 
        return vm_map_page_query_internal(
                map,
@@ -1771,13 +2225,14 @@ mach_vm_page_query(
 
 kern_return_t
 vm_map_page_query(
 
 kern_return_t
 vm_map_page_query(
-       vm_map_t                map,
-       vm_offset_t             offset,
-       int                     *disposition,
-       int                     *ref_count)
+       vm_map_t                map,
+       vm_offset_t             offset,
+       int                     *disposition,
+       int                     *ref_count)
 {
 {
-       if (VM_MAP_NULL == map)
+       if (VM_MAP_NULL == map) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        return vm_map_page_query_internal(
                map,
 
        return vm_map_page_query_internal(
                map,
@@ -1785,15 +2240,146 @@ vm_map_page_query(
                disposition, ref_count);
 }
 
                disposition, ref_count);
 }
 
+kern_return_t
+mach_vm_page_range_query(
+       vm_map_t                map,
+       mach_vm_offset_t        address,
+       mach_vm_size_t          size,
+       mach_vm_address_t       dispositions_addr,
+       mach_vm_size_t          *dispositions_count)
+{
+       kern_return_t           kr = KERN_SUCCESS;
+       int                     num_pages = 0, i = 0;
+       mach_vm_size_t          curr_sz = 0, copy_sz = 0;
+       mach_vm_size_t          disp_buf_req_size = 0, disp_buf_total_size = 0;
+       mach_msg_type_number_t  count = 0;
+
+       void                    *info = NULL;
+       void                    *local_disp = NULL;;
+       vm_map_size_t           info_size = 0, local_disp_size = 0;
+       mach_vm_offset_t        start = 0, end = 0;
+       int                     effective_page_shift, effective_page_size, effective_page_mask;
+
+       if (map == VM_MAP_NULL || dispositions_count == NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       effective_page_shift = vm_self_region_page_shift_safely(map);
+       if (effective_page_shift == -1) {
+               return KERN_INVALID_ARGUMENT;
+       }
+       effective_page_size = (1 << effective_page_shift);
+       effective_page_mask = effective_page_size - 1;
+
+       if (os_mul_overflow(*dispositions_count, sizeof(int), &disp_buf_req_size)) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       start = vm_map_trunc_page(address, effective_page_mask);
+       end = vm_map_round_page(address + size, effective_page_mask);
+
+       if (end < start) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       if ((end - start) < size) {
+               /*
+                * Aligned size is less than unaligned size.
+                */
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       if (disp_buf_req_size == 0 || (end == start)) {
+               return KERN_SUCCESS;
+       }
+
+       /*
+        * For large requests, we will go through them
+        * MAX_PAGE_RANGE_QUERY chunk at a time.
+        */
+
+       curr_sz = MIN(end - start, MAX_PAGE_RANGE_QUERY);
+       num_pages = (int) (curr_sz >> effective_page_shift);
+
+       info_size = num_pages * sizeof(vm_page_info_basic_data_t);
+       info = kheap_alloc(KHEAP_TEMP, info_size, Z_WAITOK);
+
+       local_disp_size = num_pages * sizeof(int);
+       local_disp = kheap_alloc(KHEAP_TEMP, local_disp_size, Z_WAITOK);
+
+       if (info == NULL || local_disp == NULL) {
+               kr = KERN_RESOURCE_SHORTAGE;
+               goto out;
+       }
+
+       while (size) {
+               count = VM_PAGE_INFO_BASIC_COUNT;
+               kr = vm_map_page_range_info_internal(
+                       map,
+                       start,
+                       vm_map_round_page(start + curr_sz, effective_page_mask),
+                       effective_page_shift,
+                       VM_PAGE_INFO_BASIC,
+                       (vm_page_info_t) info,
+                       &count);
+
+               assert(kr == KERN_SUCCESS);
+
+               for (i = 0; i < num_pages; i++) {
+                       ((int*)local_disp)[i] = ((vm_page_info_basic_t)info)[i].disposition;
+               }
+
+               copy_sz = MIN(disp_buf_req_size, num_pages * sizeof(int) /* an int per page */);
+               kr = copyout(local_disp, (mach_vm_address_t)dispositions_addr, copy_sz);
+
+               start += curr_sz;
+               disp_buf_req_size -= copy_sz;
+               disp_buf_total_size += copy_sz;
+
+               if (kr != 0) {
+                       break;
+               }
+
+               if ((disp_buf_req_size == 0) || (curr_sz >= size)) {
+                       /*
+                        * We might have inspected the full range OR
+                        * more than it esp. if the user passed in
+                        * non-page aligned start/size and/or if we
+                        * descended into a submap. We are done here.
+                        */
+
+                       size = 0;
+               } else {
+                       dispositions_addr += copy_sz;
+
+                       size -= curr_sz;
+
+                       curr_sz = MIN(vm_map_round_page(size, effective_page_mask), MAX_PAGE_RANGE_QUERY);
+                       num_pages = (int)(curr_sz >> effective_page_shift);
+               }
+       }
+
+       *dispositions_count = disp_buf_total_size / sizeof(int);
+
+out:
+       if (local_disp) {
+               kheap_free(KHEAP_TEMP, local_disp, local_disp_size);
+       }
+       if (info) {
+               kheap_free(KHEAP_TEMP, info, info_size);
+       }
+       return kr;
+}
+
 kern_return_t
 mach_vm_page_info(
 kern_return_t
 mach_vm_page_info(
-       vm_map_t                map,
-       mach_vm_address_t       address,
-       vm_page_info_flavor_t   flavor,
-       vm_page_info_t          info,
-       mach_msg_type_number_t  *count)
+       vm_map_t                map,
+       mach_vm_address_t       address,
+       vm_page_info_flavor_t   flavor,
+       vm_page_info_t          info,
+       mach_msg_type_number_t  *count)
 {
 {
-       kern_return_t   kr;
+       kern_return_t   kr;
 
        if (map == VM_MAP_NULL) {
                return KERN_INVALID_ARGUMENT;
 
        if (map == VM_MAP_NULL) {
                return KERN_INVALID_ARGUMENT;
@@ -1806,15 +2392,16 @@ mach_vm_page_info(
 /* map a (whole) upl into an address space */
 kern_return_t
 vm_upl_map(
 /* map a (whole) upl into an address space */
 kern_return_t
 vm_upl_map(
-       vm_map_t                map, 
-       upl_t                   upl, 
-       vm_address_t            *dst_addr)
+       vm_map_t                map,
+       upl_t                   upl,
+       vm_address_t            *dst_addr)
 {
 {
-       vm_map_offset_t         map_addr;
-       kern_return_t           kr;
+       vm_map_offset_t         map_addr;
+       kern_return_t           kr;
 
 
-       if (VM_MAP_NULL == map)
+       if (VM_MAP_NULL == map) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        kr = vm_map_enter_upl(map, upl, &map_addr);
        *dst_addr = CAST_DOWN(vm_address_t, map_addr);
 
        kr = vm_map_enter_upl(map, upl, &map_addr);
        *dst_addr = CAST_DOWN(vm_address_t, map_addr);
@@ -1823,51 +2410,55 @@ vm_upl_map(
 
 kern_return_t
 vm_upl_unmap(
 
 kern_return_t
 vm_upl_unmap(
-       vm_map_t                map,
-       upl_t                   upl)
+       vm_map_t                map,
+       upl_t                   upl)
 {
 {
-       if (VM_MAP_NULL == map)
+       if (VM_MAP_NULL == map) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
 
-       return (vm_map_remove_upl(map, upl));
+       return vm_map_remove_upl(map, upl);
 }
 
 /* Retrieve a upl for an object underlying an address range in a map */
 
 kern_return_t
 vm_map_get_upl(
 }
 
 /* Retrieve a upl for an object underlying an address range in a map */
 
 kern_return_t
 vm_map_get_upl(
-       vm_map_t                map,
-       vm_map_offset_t         map_offset,
-       upl_size_t              *upl_size,
-       upl_t                   *upl,
-       upl_page_info_array_t   page_list,
-       unsigned int            *count,
-       upl_control_flags_t     *flags,
-       int                     force_data_sync)
+       vm_map_t                map,
+       vm_map_offset_t         map_offset,
+       upl_size_t              *upl_size,
+       upl_t                   *upl,
+       upl_page_info_array_t   page_list,
+       unsigned int            *count,
+       upl_control_flags_t     *flags,
+       vm_tag_t                tag,
+       int                     force_data_sync)
 {
        upl_control_flags_t map_flags;
 {
        upl_control_flags_t map_flags;
-       kern_return_t       kr;
+       kern_return_t       kr;
 
 
-       if (VM_MAP_NULL == map)
+       if (VM_MAP_NULL == map) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
+       }
 
        map_flags = *flags & ~UPL_NOZEROFILL;
 
        map_flags = *flags & ~UPL_NOZEROFILL;
-       if (force_data_sync)
+       if (force_data_sync) {
                map_flags |= UPL_FORCE_DATA_SYNC;
                map_flags |= UPL_FORCE_DATA_SYNC;
+       }
 
        kr = vm_map_create_upl(map,
 
        kr = vm_map_create_upl(map,
-                              map_offset,
-                              upl_size,
-                              upl,
-                              page_list,
-                              count,
-                              &map_flags);
+           map_offset,
+           upl_size,
+           upl,
+           page_list,
+           count,
+           &map_flags,
+           tag);
 
        *flags = (map_flags & ~UPL_FORCE_DATA_SYNC);
        return kr;
 }
 
 
        *flags = (map_flags & ~UPL_FORCE_DATA_SYNC);
        return kr;
 }
 
-
 /*
  * mach_make_memory_entry_64
  *
 /*
  * mach_make_memory_entry_64
  *
@@ -1876,81 +2467,89 @@ vm_map_get_upl(
  * somewhere else. Rather than doing it all at once (and
  * without needing access to the other whole map).
  */
  * somewhere else. Rather than doing it all at once (and
  * without needing access to the other whole map).
  */
-
 kern_return_t
 mach_make_memory_entry_64(
 kern_return_t
 mach_make_memory_entry_64(
-       vm_map_t                target_map,
-       memory_object_size_t    *size,
-       memory_object_offset_t offset,
-       vm_prot_t               permission,
-       ipc_port_t              *object_handle,
-       ipc_port_t              parent_handle)
-{
-       vm_map_version_t        version;
-       vm_named_entry_t        parent_entry;
-       vm_named_entry_t        user_entry;
-       ipc_port_t              user_handle;
-       kern_return_t           kr;
-       vm_map_t                real_map;
-
-       /* needed for call to vm_map_lookup_locked */
-       boolean_t               wired;
-       boolean_t               iskernel;
-       vm_object_offset_t      obj_off;
-       vm_prot_t               prot;
-       struct vm_object_fault_info     fault_info;
-       vm_object_t             object;
-       vm_object_t             shadow_object;
-
-       /* needed for direct map entry manipulation */
-       vm_map_entry_t          map_entry;
-       vm_map_entry_t          next_entry;
-       vm_map_t                local_map;
-       vm_map_t                original_map = target_map;
-       vm_map_size_t           total_size, map_size;
-       vm_map_offset_t         map_start, map_end;
-       vm_map_offset_t         local_offset;
-       vm_object_size_t        mappable_size;
-
-       /* 
+       vm_map_t                target_map,
+       memory_object_size_t    *size,
+       memory_object_offset_t  offset,
+       vm_prot_t               permission,
+       ipc_port_t              *object_handle,
+       ipc_port_t              parent_handle)
+{
+       vm_named_entry_kernel_flags_t   vmne_kflags;
+
+       if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_USER) {
+               /*
+                * Unknown flag: reject for forward compatibility.
+                */
+               return KERN_INVALID_VALUE;
+       }
+
+       vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
+       if (permission & MAP_MEM_LEDGER_TAGGED) {
+               vmne_kflags.vmnekf_ledger_tag = VM_LEDGER_TAG_DEFAULT;
+       }
+       return mach_make_memory_entry_internal(target_map,
+                  size,
+                  offset,
+                  permission,
+                  vmne_kflags,
+                  object_handle,
+                  parent_handle);
+}
+
+kern_return_t
+mach_make_memory_entry_internal(
+       vm_map_t                target_map,
+       memory_object_size_t    *size,
+       memory_object_offset_t  offset,
+       vm_prot_t               permission,
+       vm_named_entry_kernel_flags_t   vmne_kflags,
+       ipc_port_t              *object_handle,
+       ipc_port_t              parent_handle)
+{
+       vm_named_entry_t        parent_entry;
+       vm_named_entry_t        user_entry;
+       ipc_port_t              user_handle;
+       kern_return_t           kr;
+       vm_object_t             object;
+       vm_map_size_t           map_size;
+       vm_map_offset_t         map_start, map_end;
+
+       /*
         * Stash the offset in the page for use by vm_map_enter_mem_object()
         * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
         */
         * Stash the offset in the page for use by vm_map_enter_mem_object()
         * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
         */
-       vm_object_offset_t      offset_in_page;
-
-       unsigned int            access;
-       vm_prot_t               protections;
-       vm_prot_t               original_protections, mask_protections;
-       unsigned int            wimg_mode;
-
-       boolean_t               force_shadow = FALSE;
-       boolean_t               use_data_addr;
-       boolean_t               use_4K_compat;
-
-       if (((permission & 0x00FF0000) &
-            ~(MAP_MEM_ONLY |
-              MAP_MEM_NAMED_CREATE |
-              MAP_MEM_GRAB_SECLUDED | /* XXX FBDP TODO: restrict usage? */
-              MAP_MEM_PURGABLE | 
-              MAP_MEM_NAMED_REUSE |
-              MAP_MEM_USE_DATA_ADDR |
-              MAP_MEM_VM_COPY |
-              MAP_MEM_4K_DATA_ADDR |
-              MAP_MEM_VM_SHARE))) {
+       vm_object_offset_t      offset_in_page;
+
+       unsigned int            access;
+       vm_prot_t               protections;
+       vm_prot_t               original_protections, mask_protections;
+       unsigned int            wimg_mode;
+       boolean_t               use_data_addr;
+       boolean_t               use_4K_compat;
+
+       DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x\n", target_map, offset, *size, permission);
+
+       user_entry = NULL;
+
+       if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_ALL) {
                /*
                 * Unknown flag: reject for forward compatibility.
                 */
                /*
                 * Unknown flag: reject for forward compatibility.
                 */
+               DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_VALUE);
                return KERN_INVALID_VALUE;
        }
 
                return KERN_INVALID_VALUE;
        }
 
-       if (parent_handle != IP_NULL &&
+       if (IP_VALID(parent_handle) &&
            ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) {
            ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) {
-               parent_entry = (vm_named_entry_t) parent_handle->ip_kobject;
+               parent_entry = (vm_named_entry_t) ip_get_kobject(parent_handle);
        } else {
                parent_entry = NULL;
        }
 
        if (parent_entry && parent_entry->is_copy) {
        } else {
                parent_entry = NULL;
        }
 
        if (parent_entry && parent_entry->is_copy) {
+               DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT);
                return KERN_INVALID_ARGUMENT;
        }
 
                return KERN_INVALID_ARGUMENT;
        }
 
@@ -1964,68 +2563,74 @@ mach_make_memory_entry_64(
        user_handle = IP_NULL;
        user_entry = NULL;
 
        user_handle = IP_NULL;
        user_entry = NULL;
 
-       map_start = vm_map_trunc_page(offset, PAGE_MASK);
+       map_start = vm_map_trunc_page(offset, VM_MAP_PAGE_MASK(target_map));
 
        if (permission & MAP_MEM_ONLY) {
 
        if (permission & MAP_MEM_ONLY) {
-               boolean_t               parent_is_object;
+               boolean_t               parent_is_object;
 
 
-               map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+               map_end = vm_map_round_page(offset + *size, VM_MAP_PAGE_MASK(target_map));
                map_size = map_end - map_start;
                map_size = map_end - map_start;
-               
+
                if (use_data_addr || use_4K_compat || parent_entry == NULL) {
                if (use_data_addr || use_4K_compat || parent_entry == NULL) {
+                       DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT);
                        return KERN_INVALID_ARGUMENT;
                }
 
                        return KERN_INVALID_ARGUMENT;
                }
 
-               parent_is_object = !(parent_entry->is_sub_map ||
-                                    parent_entry->is_pager);
-               object = parent_entry->backing.object;
-               if(parent_is_object && object != VM_OBJECT_NULL)
+               parent_is_object = parent_entry->is_object;
+               if (!parent_is_object) {
+                       DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT);
+                       return KERN_INVALID_ARGUMENT;
+               }
+               object = vm_named_entry_to_vm_object(parent_entry);
+               if (parent_is_object && object != VM_OBJECT_NULL) {
                        wimg_mode = object->wimg_bits;
                        wimg_mode = object->wimg_bits;
-               else
+               } else {
                        wimg_mode = VM_WIMG_USE_DEFAULT;
                        wimg_mode = VM_WIMG_USE_DEFAULT;
-               if((access != GET_MAP_MEM(parent_entry->protection)) &&
-                               !(parent_entry->protection & VM_PROT_WRITE)) { 
+               }
+               if ((access != GET_MAP_MEM(parent_entry->protection)) &&
+                   !(parent_entry->protection & VM_PROT_WRITE)) {
+                       DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_RIGHT);
                        return KERN_INVALID_RIGHT;
                }
                        return KERN_INVALID_RIGHT;
                }
-               if(access == MAP_MEM_IO) {
-                  SET_MAP_MEM(access, parent_entry->protection);
-                  wimg_mode = VM_WIMG_IO;
-               } else if (access == MAP_MEM_COPYBACK) {
-                  SET_MAP_MEM(access, parent_entry->protection);
-                  wimg_mode = VM_WIMG_USE_DEFAULT;
-               } else if (access == MAP_MEM_INNERWBACK) {
-                  SET_MAP_MEM(access, parent_entry->protection);
-                  wimg_mode = VM_WIMG_INNERWBACK;
-               } else if (access == MAP_MEM_WTHRU) {
-                  SET_MAP_MEM(access, parent_entry->protection);
-                  wimg_mode = VM_WIMG_WTHRU;
-               } else if (access == MAP_MEM_WCOMB) {
-                  SET_MAP_MEM(access, parent_entry->protection);
-                  wimg_mode = VM_WIMG_WCOMB;
+               vm_prot_to_wimg(access, &wimg_mode);
+               if (access != MAP_MEM_NOOP) {
+                       SET_MAP_MEM(access, parent_entry->protection);
                }
                if (parent_is_object && object &&
                }
                if (parent_is_object && object &&
-                       (access != MAP_MEM_NOOP) && 
-                       (!(object->nophyscache))) {
-
+                   (access != MAP_MEM_NOOP) &&
+                   (!(object->nophyscache))) {
                        if (object->wimg_bits != wimg_mode) {
                                vm_object_lock(object);
                                vm_object_change_wimg_mode(object, wimg_mode);
                                vm_object_unlock(object);
                        }
                }
                        if (object->wimg_bits != wimg_mode) {
                                vm_object_lock(object);
                                vm_object_change_wimg_mode(object, wimg_mode);
                                vm_object_unlock(object);
                        }
                }
-               if (object_handle)
+               if (object_handle) {
                        *object_handle = IP_NULL;
                        *object_handle = IP_NULL;
+               }
+               DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS);
                return KERN_SUCCESS;
        } else if (permission & MAP_MEM_NAMED_CREATE) {
                return KERN_SUCCESS;
        } else if (permission & MAP_MEM_NAMED_CREATE) {
-               map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+               int     ledger_flags = 0;
+               task_t  owner;
+
+               map_end = vm_map_round_page(offset + *size, VM_MAP_PAGE_MASK(target_map));
                map_size = map_end - map_start;
 
                if (use_data_addr || use_4K_compat) {
                map_size = map_end - map_start;
 
                if (use_data_addr || use_4K_compat) {
+                       DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT);
                        return KERN_INVALID_ARGUMENT;
                }
 
                        return KERN_INVALID_ARGUMENT;
                }
 
+               if (map_size == 0) {
+                       *size = 0;
+                       *object_handle = IPC_PORT_NULL;
+                       return KERN_SUCCESS;
+               }
+
                kr = mach_memory_entry_allocate(&user_entry, &user_handle);
                if (kr != KERN_SUCCESS) {
                kr = mach_memory_entry_allocate(&user_entry, &user_handle);
                if (kr != KERN_SUCCESS) {
+                       DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_FAILURE);
                        return KERN_FAILURE;
                }
 
                        return KERN_FAILURE;
                }
 
@@ -2045,31 +2650,88 @@ mach_make_memory_entry_64(
                object = vm_object_allocate(map_size);
                assert(object != VM_OBJECT_NULL);
 
                object = vm_object_allocate(map_size);
                assert(object != VM_OBJECT_NULL);
 
-               if (permission & MAP_MEM_PURGABLE) {
-                       if (! (permission & VM_PROT_WRITE)) {
-                               /* if we can't write, we can't purge */
-                               vm_object_deallocate(object);
-                               kr = KERN_INVALID_ARGUMENT;
-                               goto make_mem_done;
-                       }
-                       object->purgable = VM_PURGABLE_NONVOLATILE;
-                       assert(object->vo_purgeable_owner == NULL);
+               /*
+                * XXX
+                * We use this path when we want to make sure that
+                * nobody messes with the object (coalesce, for
+                * example) before we map it.
+                * We might want to use these objects for transposition via
+                * vm_object_transpose() too, so we don't want any copy or
+                * shadow objects either...
+                */
+               object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+               object->true_share = TRUE;
+
+               owner = current_task();
+               if ((permission & MAP_MEM_PURGABLE) ||
+                   vmne_kflags.vmnekf_ledger_tag) {
+                       assert(object->vo_owner == NULL);
                        assert(object->resident_page_count == 0);
                        assert(object->wired_page_count == 0);
                        assert(object->resident_page_count == 0);
                        assert(object->wired_page_count == 0);
+                       assert(owner != TASK_NULL);
+                       if (vmne_kflags.vmnekf_ledger_no_footprint) {
+                               ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT;
+                               object->vo_no_footprint = TRUE;
+                       }
+                       if (permission & MAP_MEM_PURGABLE) {
+                               if (!(permission & VM_PROT_WRITE)) {
+                                       /* if we can't write, we can't purge */
+                                       vm_object_deallocate(object);
+                                       kr = KERN_INVALID_ARGUMENT;
+                                       goto make_mem_done;
+                               }
+                               object->purgable = VM_PURGABLE_NONVOLATILE;
+                               if (permission & MAP_MEM_PURGABLE_KERNEL_ONLY) {
+                                       object->purgeable_only_by_kernel = TRUE;
+                               }
+#if __arm64__
+                               if (owner->task_legacy_footprint) {
+                                       /*
+                                        * For ios11, we failed to account for
+                                        * this memory.  Keep doing that for
+                                        * legacy apps (built before ios12),
+                                        * for backwards compatibility's sake...
+                                        */
+                                       owner = kernel_task;
+                               }
+#endif /* __arm64__ */
+                               vm_object_lock(object);
+                               vm_purgeable_nonvolatile_enqueue(object, owner);
+                               vm_object_unlock(object);
+                       }
+               }
+
+               if (vmne_kflags.vmnekf_ledger_tag) {
+                       /*
+                        * Bill this object to the current task's
+                        * ledgers for the given tag.
+                        */
+                       if (vmne_kflags.vmnekf_ledger_no_footprint) {
+                               ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT;
+                       }
                        vm_object_lock(object);
                        vm_object_lock(object);
-                       vm_purgeable_nonvolatile_enqueue(object,
-                                                        current_task());
+                       object->vo_ledger_tag = vmne_kflags.vmnekf_ledger_tag;
+                       kr = vm_object_ownership_change(
+                               object,
+                               vmne_kflags.vmnekf_ledger_tag,
+                               owner, /* new owner */
+                               ledger_flags,
+                               FALSE); /* task_objq locked? */
                        vm_object_unlock(object);
                        vm_object_unlock(object);
+                       if (kr != KERN_SUCCESS) {
+                               vm_object_deallocate(object);
+                               goto make_mem_done;
+                       }
                }
 
 #if CONFIG_SECLUDED_MEMORY
                if (secluded_for_iokit && /* global boot-arg */
                    ((permission & MAP_MEM_GRAB_SECLUDED)
 #if 11
                }
 
 #if CONFIG_SECLUDED_MEMORY
                if (secluded_for_iokit && /* global boot-arg */
                    ((permission & MAP_MEM_GRAB_SECLUDED)
 #if 11
-                    /* XXX FBDP for my testing only */
-                    || (secluded_for_fbdp && map_size == 97550336)
+                   /* XXX FBDP for my testing only */
+                   || (secluded_for_fbdp && map_size == 97550336)
 #endif
 #endif
-                           )) {
+                   )) {
 #if 11
                        if (!(permission & MAP_MEM_GRAB_SECLUDED) &&
                            secluded_for_fbdp) {
 #if 11
                        if (!(permission & MAP_MEM_GRAB_SECLUDED) &&
                            secluded_for_fbdp) {
@@ -2087,38 +2749,25 @@ mach_make_memory_entry_64(
                 */
 
                wimg_mode = object->wimg_bits;
                 */
 
                wimg_mode = object->wimg_bits;
-               if (access == MAP_MEM_IO) {
-                       wimg_mode = VM_WIMG_IO;
-               } else if (access == MAP_MEM_COPYBACK) {
-                       wimg_mode = VM_WIMG_USE_DEFAULT;
-               } else if (access == MAP_MEM_INNERWBACK) {
-                       wimg_mode = VM_WIMG_INNERWBACK;
-               } else if (access == MAP_MEM_WTHRU) {
-                       wimg_mode = VM_WIMG_WTHRU;
-               } else if (access == MAP_MEM_WCOMB) {
-                       wimg_mode = VM_WIMG_WCOMB;
-               }
+               vm_prot_to_wimg(access, &wimg_mode);
                if (access != MAP_MEM_NOOP) {
                        object->wimg_bits = wimg_mode;
                }
                if (access != MAP_MEM_NOOP) {
                        object->wimg_bits = wimg_mode;
                }
-               /* the object has no pages, so no WIMG bits to update here */
 
 
-               /*
-                * XXX
-                * We use this path when we want to make sure that
-                * nobody messes with the object (coalesce, for
-                * example) before we map it.
-                * We might want to use these objects for transposition via
-                * vm_object_transpose() too, so we don't want any copy or
-                * shadow objects either...
-                */
-               object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
-               object->true_share = TRUE;
+               /* the object has no pages, so no WIMG bits to update here */
 
 
-               user_entry->backing.object = object;
+               kr = vm_named_entry_from_vm_object(
+                       user_entry,
+                       object,
+                       0,
+                       map_size,
+                       (protections & VM_PROT_ALL));
+               if (kr != KERN_SUCCESS) {
+                       vm_object_deallocate(object);
+                       goto make_mem_done;
+               }
                user_entry->internal = TRUE;
                user_entry->is_sub_map = FALSE;
                user_entry->internal = TRUE;
                user_entry->is_sub_map = FALSE;
-               user_entry->is_pager = FALSE;
                user_entry->offset = 0;
                user_entry->data_offset = 0;
                user_entry->protection = protections;
                user_entry->offset = 0;
                user_entry->data_offset = 0;
                user_entry->protection = protections;
@@ -2129,47 +2778,57 @@ mach_make_memory_entry_64(
                /* when the object field is filled in.                */
 
                *size = CAST_DOWN(vm_size_t, (user_entry->size -
                /* when the object field is filled in.                */
 
                *size = CAST_DOWN(vm_size_t, (user_entry->size -
-                                             user_entry->data_offset));
+                   user_entry->data_offset));
                *object_handle = user_handle;
                *object_handle = user_handle;
+               DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS);
                return KERN_SUCCESS;
        }
 
        if (permission & MAP_MEM_VM_COPY) {
                return KERN_SUCCESS;
        }
 
        if (permission & MAP_MEM_VM_COPY) {
-               vm_map_copy_t   copy;
+               vm_map_copy_t   copy;
 
                if (target_map == VM_MAP_NULL) {
 
                if (target_map == VM_MAP_NULL) {
+                       DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_TASK);
                        return KERN_INVALID_TASK;
                }
 
                        return KERN_INVALID_TASK;
                }
 
-               map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+               map_end = vm_map_round_page(offset + *size, VM_MAP_PAGE_MASK(target_map));
                map_size = map_end - map_start;
                map_size = map_end - map_start;
+               if (map_size == 0) {
+                       DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT);
+                       return KERN_INVALID_ARGUMENT;
+               }
+
                if (use_data_addr || use_4K_compat) {
                        offset_in_page = offset - map_start;
                if (use_data_addr || use_4K_compat) {
                        offset_in_page = offset - map_start;
-                       if (use_4K_compat)
+                       if (use_4K_compat) {
                                offset_in_page &= ~((signed)(0xFFF));
                                offset_in_page &= ~((signed)(0xFFF));
+                       }
                } else {
                        offset_in_page = 0;
                }
 
                kr = vm_map_copyin_internal(target_map,
                } else {
                        offset_in_page = 0;
                }
 
                kr = vm_map_copyin_internal(target_map,
-                                           map_start,
-                                           map_size,
-                                           VM_MAP_COPYIN_ENTRY_LIST,
-                                           &copy);
+                   map_start,
+                   map_size,
+                   VM_MAP_COPYIN_ENTRY_LIST,
+                   &copy);
                if (kr != KERN_SUCCESS) {
                if (kr != KERN_SUCCESS) {
+                       DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, kr);
                        return kr;
                }
                        return kr;
                }
-                                  
+               assert(copy != VM_MAP_COPY_NULL);
+
                kr = mach_memory_entry_allocate(&user_entry, &user_handle);
                if (kr != KERN_SUCCESS) {
                        vm_map_copy_discard(copy);
                kr = mach_memory_entry_allocate(&user_entry, &user_handle);
                if (kr != KERN_SUCCESS) {
                        vm_map_copy_discard(copy);
+                       DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_FAILURE);
                        return KERN_FAILURE;
                }
 
                user_entry->backing.copy = copy;
                user_entry->internal = FALSE;
                user_entry->is_sub_map = FALSE;
                        return KERN_FAILURE;
                }
 
                user_entry->backing.copy = copy;
                user_entry->internal = FALSE;
                user_entry->is_sub_map = FALSE;
-               user_entry->is_pager = FALSE;
                user_entry->is_copy = TRUE;
                user_entry->offset = 0;
                user_entry->protection = protections;
                user_entry->is_copy = TRUE;
                user_entry->offset = 0;
                user_entry->protection = protections;
@@ -2177,48 +2836,151 @@ mach_make_memory_entry_64(
                user_entry->data_offset = offset_in_page;
 
                *size = CAST_DOWN(vm_size_t, (user_entry->size -
                user_entry->data_offset = offset_in_page;
 
                *size = CAST_DOWN(vm_size_t, (user_entry->size -
-                                             user_entry->data_offset));
+                   user_entry->data_offset));
                *object_handle = user_handle;
                *object_handle = user_handle;
+               DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS);
                return KERN_SUCCESS;
        }
 
                return KERN_SUCCESS;
        }
 
-       if (permission & MAP_MEM_VM_SHARE) {
-               vm_map_copy_t   copy;
-               vm_prot_t       cur_prot, max_prot;
+       if ((permission & MAP_MEM_VM_SHARE)
+           || parent_entry == NULL
+           || (permission & MAP_MEM_NAMED_REUSE)) {
+               vm_map_copy_t   copy;
+               vm_prot_t       cur_prot, max_prot;
+               vm_map_kernel_flags_t vmk_flags;
+               vm_map_entry_t parent_copy_entry;
 
                if (target_map == VM_MAP_NULL) {
 
                if (target_map == VM_MAP_NULL) {
+                       DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_TASK);
                        return KERN_INVALID_TASK;
                }
 
                        return KERN_INVALID_TASK;
                }
 
-               map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+               map_end = vm_map_round_page(offset + *size, VM_MAP_PAGE_MASK(target_map));
+               vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+               parent_copy_entry = VM_MAP_ENTRY_NULL;
+               if (!(permission & MAP_MEM_VM_SHARE)) {
+                       vm_map_t tmp_map, real_map;
+                       vm_map_version_t version;
+                       vm_object_t tmp_object;
+                       vm_object_offset_t obj_off;
+                       vm_prot_t prot;
+                       boolean_t wired;
+                       bool contended;
+
+                       /* resolve any pending submap copy-on-write... */
+                       if (protections & VM_PROT_WRITE) {
+                               tmp_map = target_map;
+                               vm_map_lock_read(tmp_map);
+                               kr = vm_map_lookup_locked(&tmp_map,
+                                   map_start,
+                                   protections | mask_protections,
+                                   OBJECT_LOCK_EXCLUSIVE,
+                                   &version,
+                                   &tmp_object,
+                                   &obj_off,
+                                   &prot,
+                                   &wired,
+                                   NULL,                       /* fault_info */
+                                   &real_map,
+                                   &contended);
+                               if (kr != KERN_SUCCESS) {
+                                       vm_map_unlock_read(tmp_map);
+                               } else {
+                                       vm_object_unlock(tmp_object);
+                                       vm_map_unlock_read(tmp_map);
+                                       if (real_map != tmp_map) {
+                                               vm_map_unlock_read(real_map);
+                                       }
+                               }
+                       }
+                       /* ... and carry on */
+
+                       /* stop extracting if VM object changes */
+                       vmk_flags.vmkf_copy_single_object = TRUE;
+                       if ((permission & MAP_MEM_NAMED_REUSE) &&
+                           parent_entry != NULL &&
+                           parent_entry->is_object) {
+                               vm_map_copy_t parent_copy;
+                               parent_copy = parent_entry->backing.copy;
+                               assert(parent_copy->cpy_hdr.nentries == 1);
+                               parent_copy_entry = vm_map_copy_first_entry(parent_copy);
+                               assert(!parent_copy_entry->is_sub_map);
+                       }
+               }
+
                map_size = map_end - map_start;
                map_size = map_end - map_start;
+               if (map_size == 0) {
+                       DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT);
+                       return KERN_INVALID_ARGUMENT;
+               }
+
                if (use_data_addr || use_4K_compat) {
                        offset_in_page = offset - map_start;
                if (use_data_addr || use_4K_compat) {
                        offset_in_page = offset - map_start;
-                       if (use_4K_compat)
+                       if (use_4K_compat) {
                                offset_in_page &= ~((signed)(0xFFF));
                                offset_in_page &= ~((signed)(0xFFF));
+                       }
                } else {
                        offset_in_page = 0;
                }
 
                } else {
                        offset_in_page = 0;
                }
 
-               cur_prot = VM_PROT_ALL;
+               if (mask_protections) {
+                       /*
+                        * caller is asking for whichever proctections are
+                        * available: no required protections.
+                        */
+                       cur_prot = VM_PROT_NONE;
+                       max_prot = VM_PROT_NONE;
+               } else {
+                       /*
+                        * Caller wants a memory entry with "protections".
+                        * Make sure we extract only memory that matches that.
+                        */
+                       cur_prot = protections;
+                       max_prot = protections;
+               }
+               if (target_map->pmap == kernel_pmap) {
+                       /*
+                        * Get "reserved" map entries to avoid deadlocking
+                        * on the kernel map or a kernel submap if we
+                        * run out of VM map entries and need to refill that
+                        * zone.
+                        */
+                       vmk_flags.vmkf_copy_pageable = FALSE;
+               } else {
+                       vmk_flags.vmkf_copy_pageable = TRUE;
+               }
+               vmk_flags.vmkf_copy_same_map = FALSE;
+               assert(map_size != 0);
                kr = vm_map_copy_extract(target_map,
                kr = vm_map_copy_extract(target_map,
-                                        map_start,
-                                        map_size,
-                                        &copy,
-                                        &cur_prot,
-                                        &max_prot);
+                   map_start,
+                   map_size,
+                   FALSE,                      /* copy */
+                   &copy,
+                   &cur_prot,
+                   &max_prot,
+                   VM_INHERIT_SHARE,
+                   vmk_flags);
                if (kr != KERN_SUCCESS) {
                if (kr != KERN_SUCCESS) {
+                       DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, kr);
+                       if (VM_MAP_PAGE_SHIFT(target_map) < PAGE_SHIFT) {
+//                             panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, kr);
+                       }
                        return kr;
                }
                        return kr;
                }
+               assert(copy != VM_MAP_COPY_NULL);
 
                if (mask_protections) {
                        /*
 
                if (mask_protections) {
                        /*
-                        * We just want as much of "original_protections" 
+                        * We just want as much of "original_protections"
                         * as we can get out of the actual "cur_prot".
                         */
                        protections &= cur_prot;
                        if (protections == VM_PROT_NONE) {
                                /* no access at all: fail */
                         * as we can get out of the actual "cur_prot".
                         */
                        protections &= cur_prot;
                        if (protections == VM_PROT_NONE) {
                                /* no access at all: fail */
+                               DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_PROTECTION_FAILURE);
+                               if (VM_MAP_PAGE_SHIFT(target_map) < PAGE_SHIFT) {
+//                                     panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, kr);
+                               }
                                vm_map_copy_discard(copy);
                                return KERN_PROTECTION_FAILURE;
                        }
                                vm_map_copy_discard(copy);
                                return KERN_PROTECTION_FAILURE;
                        }
@@ -2227,618 +2989,196 @@ mach_make_memory_entry_64(
                         * We want exactly "original_protections"
                         * out of "cur_prot".
                         */
                         * We want exactly "original_protections"
                         * out of "cur_prot".
                         */
+                       assert((cur_prot & protections) == protections);
+                       assert((max_prot & protections) == protections);
+                       /* XXX FBDP TODO: no longer needed? */
                        if ((cur_prot & protections) != protections) {
                        if ((cur_prot & protections) != protections) {
+                               if (VM_MAP_PAGE_SHIFT(target_map) < PAGE_SHIFT) {
+//                                     panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, KERN_PROTECTION_FAILURE);
+                               }
                                vm_map_copy_discard(copy);
                                vm_map_copy_discard(copy);
+                               DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_PROTECTION_FAILURE);
                                return KERN_PROTECTION_FAILURE;
                        }
                }
 
                                return KERN_PROTECTION_FAILURE;
                        }
                }
 
+               if (!(permission & MAP_MEM_VM_SHARE)) {
+                       vm_map_entry_t copy_entry;
+
+                       /* limit size to what's actually covered by "copy" */
+                       assert(copy->cpy_hdr.nentries == 1);
+                       copy_entry = vm_map_copy_first_entry(copy);
+                       map_size = copy_entry->vme_end - copy_entry->vme_start;
+
+                       if ((permission & MAP_MEM_NAMED_REUSE) &&
+                           parent_copy_entry != VM_MAP_ENTRY_NULL &&
+                           VME_OBJECT(copy_entry) == VME_OBJECT(parent_copy_entry) &&
+                           VME_OFFSET(copy_entry) == VME_OFFSET(parent_copy_entry) &&
+                           parent_entry->offset == 0 &&
+                           parent_entry->size == map_size &&
+                           (parent_entry->data_offset == offset_in_page)) {
+                               /* we have a match: re-use "parent_entry" */
+
+                               /* release our new "copy" */
+                               vm_map_copy_discard(copy);
+                               /* get extra send right on handle */
+                               ipc_port_copy_send(parent_handle);
+
+                               *size = CAST_DOWN(vm_size_t,
+                                   (parent_entry->size -
+                                   parent_entry->data_offset));
+                               *object_handle = parent_handle;
+                               DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS);
+                               return KERN_SUCCESS;
+                       }
+
+                       /* no match: we need to create a new entry */
+                       object = VME_OBJECT(copy_entry);
+                       vm_object_lock(object);
+                       wimg_mode = object->wimg_bits;
+                       if (!(object->nophyscache)) {
+                               vm_prot_to_wimg(access, &wimg_mode);
+                       }
+                       if (object->wimg_bits != wimg_mode) {
+                               vm_object_change_wimg_mode(object, wimg_mode);
+                       }
+                       vm_object_unlock(object);
+               }
+
                kr = mach_memory_entry_allocate(&user_entry, &user_handle);
                if (kr != KERN_SUCCESS) {
                kr = mach_memory_entry_allocate(&user_entry, &user_handle);
                if (kr != KERN_SUCCESS) {
+                       if (VM_MAP_PAGE_SHIFT(target_map) < PAGE_SHIFT) {
+//                             panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, kr);
+                       }
                        vm_map_copy_discard(copy);
                        vm_map_copy_discard(copy);
+                       DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_FAILURE);
                        return KERN_FAILURE;
                }
 
                user_entry->backing.copy = copy;
                        return KERN_FAILURE;
                }
 
                user_entry->backing.copy = copy;
-               user_entry->internal = FALSE;
                user_entry->is_sub_map = FALSE;
                user_entry->is_sub_map = FALSE;
-               user_entry->is_pager = FALSE;
-               user_entry->is_copy = TRUE;
-               user_entry->offset = 0;
+               user_entry->is_object = FALSE;
+               user_entry->internal = FALSE;
                user_entry->protection = protections;
                user_entry->size = map_size;
                user_entry->data_offset = offset_in_page;
 
                user_entry->protection = protections;
                user_entry->size = map_size;
                user_entry->data_offset = offset_in_page;
 
+               if (permission & MAP_MEM_VM_SHARE) {
+                       user_entry->is_copy = TRUE;
+                       user_entry->offset = 0;
+               } else {
+                       user_entry->is_object = TRUE;
+                       user_entry->internal = object->internal;
+                       user_entry->offset = VME_OFFSET(vm_map_copy_first_entry(copy));
+                       SET_MAP_MEM(GET_MAP_MEM(permission), user_entry->protection);
+               }
+
                *size = CAST_DOWN(vm_size_t, (user_entry->size -
                *size = CAST_DOWN(vm_size_t, (user_entry->size -
-                                             user_entry->data_offset));
+                   user_entry->data_offset));
                *object_handle = user_handle;
                *object_handle = user_handle;
+               DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS);
                return KERN_SUCCESS;
        }
 
                return KERN_SUCCESS;
        }
 
-       if (parent_entry == NULL ||
-           (permission & MAP_MEM_NAMED_REUSE)) {
+       /* The new object will be based on an existing named object */
+       if (parent_entry == NULL) {
+               kr = KERN_INVALID_ARGUMENT;
+               goto make_mem_done;
+       }
 
 
-               map_end = vm_map_round_page(offset + *size, PAGE_MASK);
-               map_size = map_end - map_start;
-               if (use_data_addr || use_4K_compat) {
-                       offset_in_page = offset - map_start;
-                       if (use_4K_compat)
-                               offset_in_page &= ~((signed)(0xFFF));
-               } else {
-                       offset_in_page = 0;
-               }
-
-               /* Create a named object based on address range within the task map */
-               /* Go find the object at given address */
-
-               if (target_map == VM_MAP_NULL) {
-                       return KERN_INVALID_TASK;
-               }
-
-redo_lookup:
-               protections = original_protections;
-               vm_map_lock_read(target_map);
-
-               /* get the object associated with the target address */
-               /* note we check the permission of the range against */
-               /* that requested by the caller */
-
-               kr = vm_map_lookup_locked(&target_map, map_start, 
-                                         protections | mask_protections,
-                                         OBJECT_LOCK_EXCLUSIVE, &version,
-                                         &object, &obj_off, &prot, &wired,
-                                         &fault_info,
-                                         &real_map);
-               if (kr != KERN_SUCCESS) {
-                       vm_map_unlock_read(target_map);
-                       goto make_mem_done;
-               }
-               if (mask_protections) {
-                       /*
-                        * The caller asked us to use the "protections" as
-                        * a mask, so restrict "protections" to what this
-                        * mapping actually allows.
-                        */
-                       protections &= prot;
-               }
-
-               if (((prot & protections) != protections) 
-                   || (object == kernel_object)) {
-                       kr = KERN_INVALID_RIGHT;
-                       vm_object_unlock(object);
-                       vm_map_unlock_read(target_map);
-                       if(real_map != target_map)
-                               vm_map_unlock_read(real_map);
-                       if(object == kernel_object) {
-                               printf("Warning: Attempt to create a named"
-                                       " entry from the kernel_object\n");
-                       }
-                       goto make_mem_done;
-               }
-
-               /* We have an object, now check to see if this object */
-               /* is suitable.  If not, create a shadow and share that */
+       if (parent_entry->is_copy) {
+               panic("parent_entry %p is_copy not supported\n", parent_entry);
+               kr = KERN_INVALID_ARGUMENT;
+               goto make_mem_done;
+       }
 
 
+       if (use_data_addr || use_4K_compat) {
                /*
                /*
-                * We have to unlock the VM object to avoid deadlocking with
-                * a VM map lock (the lock ordering is map, the object), if we
-                * need to modify the VM map to create a shadow object.  Since
-                * we might release the VM map lock below anyway, we have
-                * to release the VM map lock now.
-                * XXX FBDP There must be a way to avoid this double lookup...
-                *
-                * Take an extra reference on the VM object to make sure it's
-                * not going to disappear.
+                * submaps and pagers should only be accessible from within
+                * the kernel, which shouldn't use the data address flag, so can fail here.
                 */
                 */
-               vm_object_reference_locked(object); /* extra ref to hold obj */
-               vm_object_unlock(object);
-
-               local_map = original_map;
-               local_offset = map_start;
-               if(target_map != local_map) {
-                       vm_map_unlock_read(target_map);
-                       if(real_map != target_map)
-                               vm_map_unlock_read(real_map);
-                       vm_map_lock_read(local_map);
-                       target_map = local_map;
-                       real_map = local_map;
-               }
-               while(TRUE) {
-                  if(!vm_map_lookup_entry(local_map, 
-                                               local_offset, &map_entry)) {
-                       kr = KERN_INVALID_ARGUMENT;
-                        vm_map_unlock_read(target_map);
-                       if(real_map != target_map)
-                               vm_map_unlock_read(real_map);
-                        vm_object_deallocate(object); /* release extra ref */
-                       object = VM_OBJECT_NULL;
-                        goto make_mem_done;
-                  }
-                  iskernel = (local_map->pmap == kernel_pmap);
-                  if(!(map_entry->is_sub_map)) {
-                     if (VME_OBJECT(map_entry) != object) {
-                        kr = KERN_INVALID_ARGUMENT;
-                         vm_map_unlock_read(target_map);
-                        if(real_map != target_map)
-                               vm_map_unlock_read(real_map);
-                         vm_object_deallocate(object); /* release extra ref */
-                        object = VM_OBJECT_NULL;
-                         goto make_mem_done;
-                     }
-                     break;
-                  } else {
-                       vm_map_t        tmap;
-                       tmap = local_map;
-                       local_map = VME_SUBMAP(map_entry);
-                       
-                       vm_map_lock_read(local_map);
-                       vm_map_unlock_read(tmap);
-                       target_map = local_map;
-                       real_map = local_map;
-                       local_offset = local_offset - map_entry->vme_start;
-                       local_offset += VME_OFFSET(map_entry);
-                  }
+               if (parent_entry->is_sub_map) {
+                       panic("Shouldn't be using data address with a parent entry that is a submap.");
                }
                }
-
                /*
                /*
-                * We found the VM map entry, lock the VM object again.
+                * Account for offset to data in parent entry and
+                * compute our own offset to data.
                 */
                 */
-               vm_object_lock(object);
-               if(map_entry->wired_count) {
-                        /* JMM - The check below should be reworked instead. */
-                        object->true_share = TRUE;
-                     }
-               if (mask_protections) {
-                       /*
-                        * The caller asked us to use the "protections" as
-                        * a mask, so restrict "protections" to what this
-                        * mapping actually allows.
-                        */
-                       protections &= map_entry->max_protection;
-               }
-               if(((map_entry->max_protection) & protections) != protections) {
-                        kr = KERN_INVALID_RIGHT;
-                         vm_object_unlock(object);
-                         vm_map_unlock_read(target_map);
-                        if(real_map != target_map)
-                               vm_map_unlock_read(real_map);
-                        vm_object_deallocate(object);
-                        object = VM_OBJECT_NULL;
-                         goto make_mem_done;
-               }
-
-               mappable_size = fault_info.hi_offset - obj_off;
-               total_size = map_entry->vme_end - map_entry->vme_start;
-               if(map_size > mappable_size) {
-                       /* try to extend mappable size if the entries */
-                       /* following are from the same object and are */
-                       /* compatible */
-                       next_entry = map_entry->vme_next;
-                       /* lets see if the next map entry is still   */
-                       /* pointing at this object and is contiguous */
-                       while(map_size > mappable_size) {
-                               if ((VME_OBJECT(next_entry) == object) &&
-                                   (next_entry->vme_start == 
-                                    next_entry->vme_prev->vme_end) &&
-                                   (VME_OFFSET(next_entry) == 
-                                    (VME_OFFSET(next_entry->vme_prev) + 
-                                     (next_entry->vme_prev->vme_end - 
-                                      next_entry->vme_prev->vme_start)))) {
-                                       if (mask_protections) {
-                                               /*
-                                                * The caller asked us to use
-                                                * the "protections" as a mask,
-                                                * so restrict "protections" to
-                                                * what this mapping actually
-                                                * allows.
-                                                */
-                                               protections &= next_entry->max_protection;
-                                       }
-                                       if ((next_entry->wired_count) &&
-                                           (map_entry->wired_count == 0)) {
-                                               break;
-                                       }
-                                       if(((next_entry->max_protection) 
-                                               & protections) != protections) {
-                                               break;
-                                       }
-                                       if (next_entry->needs_copy !=
-                                           map_entry->needs_copy)
-                                               break;
-                                       mappable_size += next_entry->vme_end
-                                               - next_entry->vme_start;
-                                       total_size += next_entry->vme_end
-                                               - next_entry->vme_start;
-                                       next_entry = next_entry->vme_next;
-                               } else {
-                                       break;
-                               }
-                       
-                       }
+               if ((offset + *size + parent_entry->data_offset) > parent_entry->size) {
+                       kr = KERN_INVALID_ARGUMENT;
+                       goto make_mem_done;
                }
 
                }
 
-               /* vm_map_entry_should_cow_for_true_share() checks for malloc tags,
-                * never true in kernel */ 
-               if (!iskernel && vm_map_entry_should_cow_for_true_share(map_entry) &&
-                   object->vo_size > map_size &&
-                   map_size != 0) {
-                       /*
-                        * Set up the targeted range for copy-on-write to
-                        * limit the impact of "true_share"/"copy_delay" to
-                        * that range instead of the entire VM object...
-                        */
-                       
-                       vm_object_unlock(object);
-                       if (vm_map_lock_read_to_write(target_map)) {
-                               vm_object_deallocate(object);
-                               target_map = original_map;
-                               goto redo_lookup;
-                       }
-
-                       vm_map_clip_start(target_map,
-                                         map_entry,
-                                         vm_map_trunc_page(map_start,
-                                                           VM_MAP_PAGE_MASK(target_map)));
-                       vm_map_clip_end(target_map,
-                                       map_entry,
-                                       (vm_map_round_page(map_end,
-                                                          VM_MAP_PAGE_MASK(target_map))));
-                       force_shadow = TRUE;
-
-                       if ((map_entry->vme_end - offset) < map_size) {
-                               map_size = map_entry->vme_end - map_start;
-                       }
-                       total_size = map_entry->vme_end - map_entry->vme_start;
-
-                       vm_map_lock_write_to_read(target_map);
-                       vm_object_lock(object);
+               map_start = vm_map_trunc_page(offset + parent_entry->data_offset, PAGE_MASK);
+               offset_in_page = (offset + parent_entry->data_offset) - map_start;
+               if (use_4K_compat) {
+                       offset_in_page &= ~((signed)(0xFFF));
                }
                }
+               map_end = vm_map_round_page(offset + parent_entry->data_offset + *size, PAGE_MASK);
+               map_size = map_end - map_start;
+       } else {
+               map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+               map_size = map_end - map_start;
+               offset_in_page = 0;
 
 
-               if (object->internal) {
-                       /* vm_map_lookup_locked will create a shadow if   */
-                       /* needs_copy is set but does not check for the   */
-                       /* other two conditions shown. It is important to */ 
-                       /* set up an object which will not be pulled from */
-                       /* under us.  */
-
-                       if (force_shadow ||
-                           ((map_entry->needs_copy  ||
-                             object->shadowed ||
-                             (object->vo_size > total_size &&
-                              (VME_OFFSET(map_entry) != 0 ||
-                               object->vo_size >
-                               vm_map_round_page(total_size,
-                                                 VM_MAP_PAGE_MASK(target_map)))))
-                            && !object->true_share)) {
-                               /*
-                                * We have to unlock the VM object before
-                                * trying to upgrade the VM map lock, to
-                                * honor lock ordering (map then object).
-                                * Otherwise, we would deadlock if another
-                                * thread holds a read lock on the VM map and
-                                * is trying to acquire the VM object's lock.
-                                * We still hold an extra reference on the
-                                * VM object, guaranteeing that it won't
-                                * disappear.
-                                */
-                               vm_object_unlock(object);
-
-                               if (vm_map_lock_read_to_write(target_map)) {
-                                       /*
-                                        * We couldn't upgrade our VM map lock
-                                        * from "read" to "write" and we lost
-                                        * our "read" lock.
-                                        * Start all over again...
-                                        */
-                                       vm_object_deallocate(object); /* extra ref */
-                                       target_map = original_map;
-                                       goto redo_lookup;
-                               }
-#if 00
-                               vm_object_lock(object);
-#endif
-
-                               /* 
-                                * JMM - We need to avoid coming here when the object
-                                * is wired by anybody, not just the current map.  Why
-                                * couldn't we use the standard vm_object_copy_quickly()
-                                * approach here?
-                                */
-                                
-                               /* create a shadow object */
-                               VME_OBJECT_SHADOW(map_entry, total_size);
-                               shadow_object = VME_OBJECT(map_entry);
-#if 00
-                               vm_object_unlock(object);
-#endif
-
-                               prot = map_entry->protection & ~VM_PROT_WRITE;
-
-                               if (override_nx(target_map,
-                                               VME_ALIAS(map_entry))
-                                   && prot)
-                                       prot |= VM_PROT_EXECUTE;
-
-                               vm_object_pmap_protect(
-                                       object, VME_OFFSET(map_entry),
-                                       total_size,
-                                       ((map_entry->is_shared 
-                                         || target_map->mapped_in_other_pmaps)
-                                                       ? PMAP_NULL :
-                                                       target_map->pmap),
-                                       map_entry->vme_start,
-                                       prot);
-                               total_size -= (map_entry->vme_end 
-                                               - map_entry->vme_start);
-                               next_entry = map_entry->vme_next;
-                               map_entry->needs_copy = FALSE;
-
-                               vm_object_lock(shadow_object);
-                               while (total_size) {
-                                   assert((next_entry->wired_count == 0) ||
-                                          (map_entry->wired_count));
-
-                                   if (VME_OBJECT(next_entry) == object) {
-                                       vm_object_reference_locked(shadow_object);
-                                       VME_OBJECT_SET(next_entry,
-                                                      shadow_object);
-                                       vm_object_deallocate(object);
-                                       VME_OFFSET_SET(
-                                               next_entry,
-                                               (VME_OFFSET(next_entry->vme_prev) +
-                                                (next_entry->vme_prev->vme_end 
-                                                 - next_entry->vme_prev->vme_start)));
-                                               next_entry->needs_copy = FALSE;
-                                       } else {
-                                               panic("mach_make_memory_entry_64:"
-                                                 " map entries out of sync\n");
-                                       }
-                                       total_size -= 
-                                               next_entry->vme_end 
-                                                       - next_entry->vme_start;
-                                       next_entry = next_entry->vme_next;
-                               }
-
-                               /*
-                                * Transfer our extra reference to the
-                                * shadow object.
-                                */
-                               vm_object_reference_locked(shadow_object);
-                               vm_object_deallocate(object); /* extra ref */
-                               object = shadow_object;
-
-                               obj_off = ((local_offset - map_entry->vme_start)
-                                          + VME_OFFSET(map_entry));
-
-                               vm_map_lock_write_to_read(target_map);
-                       }
-               }
-
-               /* note: in the future we can (if necessary) allow for  */
-               /* memory object lists, this will better support        */
-               /* fragmentation, but is it necessary?  The user should */
-               /* be encouraged to create address space oriented       */
-               /* shared objects from CLEAN memory regions which have  */
-               /* a known and defined history.  i.e. no inheritence    */
-               /* share, make this call before making the region the   */
-               /* target of ipc's, etc.  The code above, protecting    */
-               /* against delayed copy, etc. is mostly defensive.      */
-
-               wimg_mode = object->wimg_bits;
-               if(!(object->nophyscache)) {
-                       if(access == MAP_MEM_IO) {
-                               wimg_mode = VM_WIMG_IO;
-                       } else if (access == MAP_MEM_COPYBACK) {
-                               wimg_mode = VM_WIMG_USE_DEFAULT;
-                       } else if (access == MAP_MEM_INNERWBACK) {
-                               wimg_mode = VM_WIMG_INNERWBACK;
-                       } else if (access == MAP_MEM_WTHRU) {
-                               wimg_mode = VM_WIMG_WTHRU;
-                       } else if (access == MAP_MEM_WCOMB) {
-                               wimg_mode = VM_WIMG_WCOMB;
-                       }
-               }
-
-#if VM_OBJECT_TRACKING_OP_TRUESHARE
-               if (!object->true_share &&
-                   vm_object_tracking_inited) {
-                       void *bt[VM_OBJECT_TRACKING_BTDEPTH];
-                       int num = 0;
-
-                       num = OSBacktrace(bt,
-                                         VM_OBJECT_TRACKING_BTDEPTH);
-                       btlog_add_entry(vm_object_tracking_btlog,
-                                       object,
-                                       VM_OBJECT_TRACKING_OP_TRUESHARE,
-                                       bt,
-                                       num);
+               if ((offset + map_size) > parent_entry->size) {
+                       kr = KERN_INVALID_ARGUMENT;
+                       goto make_mem_done;
                }
                }
-#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
-
-               vm_object_lock_assert_exclusive(object);
-               object->true_share = TRUE;
-               if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
-                       object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+       }
 
 
+       if (mask_protections) {
                /*
                /*
-                * The memory entry now points to this VM object and we
-                * need to hold a reference on the VM object.  Use the extra
-                * reference we took earlier to keep the object alive when we
-                * had to unlock it.
+                * The caller asked us to use the "protections" as
+                * a mask, so restrict "protections" to what this
+                * mapping actually allows.
                 */
                 */
+               protections &= parent_entry->protection;
+       }
+       if ((protections & parent_entry->protection) != protections) {
+               kr = KERN_PROTECTION_FAILURE;
+               goto make_mem_done;
+       }
 
 
-               vm_map_unlock_read(target_map);
-               if(real_map != target_map)
-                       vm_map_unlock_read(real_map);
-
-               if (object->wimg_bits != wimg_mode)
-                       vm_object_change_wimg_mode(object, wimg_mode);
-
-               /* the size of mapped entry that overlaps with our region */
-               /* which is targeted for share.                           */
-               /* (entry_end - entry_start) -                            */
-               /*                   offset of our beg addr within entry  */
-               /* it corresponds to this:                                */
-
-               if(map_size > mappable_size)
-                       map_size = mappable_size;
-
-               if (permission & MAP_MEM_NAMED_REUSE) {
-                       /*
-                        * Compare what we got with the "parent_entry".
-                        * If they match, re-use the "parent_entry" instead
-                        * of creating a new one.
-                        */
-                       if (parent_entry != NULL &&
-                           parent_entry->backing.object == object &&
-                           parent_entry->internal == object->internal &&
-                           parent_entry->is_sub_map == FALSE &&
-                           parent_entry->is_pager == FALSE &&
-                           parent_entry->offset == obj_off &&
-                           parent_entry->protection == protections &&
-                           parent_entry->size == map_size &&
-                           ((!(use_data_addr || use_4K_compat) &&
-                             (parent_entry->data_offset == 0)) ||  
-                            ((use_data_addr || use_4K_compat) &&
-                             (parent_entry->data_offset == offset_in_page)))) {
-                               /*
-                                * We have a match: re-use "parent_entry".
-                                */
-                               /* release our extra reference on object */
-                               vm_object_unlock(object);
-                               vm_object_deallocate(object);
-                               /* parent_entry->ref_count++; XXX ? */
-                               /* Get an extra send-right on handle */
-                               ipc_port_copy_send(parent_handle);
-
-                               *size = CAST_DOWN(vm_size_t,
-                                                 (parent_entry->size -
-                                                  parent_entry->data_offset));
-                               *object_handle = parent_handle;
-                               return KERN_SUCCESS;
-                       } else {
-                               /*
-                                * No match: we need to create a new entry.
-                                * fall through...
-                                */
-                       }
-               }
-
-               vm_object_unlock(object);
-               if (mach_memory_entry_allocate(&user_entry, &user_handle)
-                   != KERN_SUCCESS) {
-                       /* release our unused reference on the object */
-                       vm_object_deallocate(object);
-                       return KERN_FAILURE;
-               }
-
-               user_entry->backing.object = object;
-               user_entry->internal = object->internal;
-               user_entry->is_sub_map = FALSE;
-               user_entry->is_pager = FALSE;
-               user_entry->offset = obj_off;
-               user_entry->data_offset = offset_in_page;
-               user_entry->protection = protections;
-               SET_MAP_MEM(GET_MAP_MEM(permission), user_entry->protection);
-               user_entry->size = map_size;
+       if (mach_memory_entry_allocate(&user_entry, &user_handle)
+           != KERN_SUCCESS) {
+               kr = KERN_FAILURE;
+               goto make_mem_done;
+       }
 
 
-               /* user_object pager and internal fields are not used */
-               /* when the object field is filled in.                */
+       user_entry->size = map_size;
+       user_entry->offset = parent_entry->offset + map_start;
+       user_entry->data_offset = offset_in_page;
+       user_entry->is_sub_map = parent_entry->is_sub_map;
+       user_entry->is_copy = parent_entry->is_copy;
+       user_entry->internal = parent_entry->internal;
+       user_entry->protection = protections;
 
 
-               *size = CAST_DOWN(vm_size_t, (user_entry->size -
-                                             user_entry->data_offset));
-               *object_handle = user_handle;
-               return KERN_SUCCESS;
+       if (access != MAP_MEM_NOOP) {
+               SET_MAP_MEM(access, user_entry->protection);
+       }
 
 
+       if (parent_entry->is_sub_map) {
+               vm_map_t map = parent_entry->backing.map;
+               vm_map_reference(map);
+               user_entry->backing.map = map;
        } else {
        } else {
-               /* The new object will be base on an existing named object */
-               if (parent_entry == NULL) {
-                       kr = KERN_INVALID_ARGUMENT;
-                       goto make_mem_done;
-               }
-
-               if (use_data_addr || use_4K_compat) {
-                       /*
-                        * submaps and pagers should only be accessible from within
-                        * the kernel, which shouldn't use the data address flag, so can fail here.
-                        */
-                       if (parent_entry->is_pager || parent_entry->is_sub_map) {
-                               panic("Shouldn't be using data address with a parent entry that is a submap or pager.");
-                       }
-                       /*
-                        * Account for offset to data in parent entry and
-                        * compute our own offset to data.
-                        */
-                       if((offset + *size + parent_entry->data_offset) > parent_entry->size) {
-                               kr = KERN_INVALID_ARGUMENT;
-                               goto make_mem_done;
-                       }
-
-                       map_start = vm_map_trunc_page(offset + parent_entry->data_offset, PAGE_MASK);
-                       offset_in_page = (offset + parent_entry->data_offset) - map_start;
-                       if (use_4K_compat)
-                               offset_in_page &= ~((signed)(0xFFF));
-                       map_end = vm_map_round_page(offset + parent_entry->data_offset + *size, PAGE_MASK);
-                       map_size = map_end - map_start;
-               } else {
-                       map_end = vm_map_round_page(offset + *size, PAGE_MASK);
-                       map_size = map_end - map_start;
-                       offset_in_page = 0;
-
-                       if((offset + map_size) > parent_entry->size) {
-                               kr = KERN_INVALID_ARGUMENT;
-                               goto make_mem_done;
-                       }
-               }
-
-               if (mask_protections) {
-                       /*
-                        * The caller asked us to use the "protections" as
-                        * a mask, so restrict "protections" to what this
-                        * mapping actually allows.
-                        */
-                       protections &= parent_entry->protection;
-               }
-               if((protections & parent_entry->protection) != protections) {
-                       kr = KERN_PROTECTION_FAILURE;
-                       goto make_mem_done;
-               }
-
-               if (mach_memory_entry_allocate(&user_entry, &user_handle)
-                   != KERN_SUCCESS) {
-                       kr = KERN_FAILURE;
+               object = vm_named_entry_to_vm_object(parent_entry);
+               assert(object != VM_OBJECT_NULL);
+               assert(object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC);
+               kr = vm_named_entry_from_vm_object(
+                       user_entry,
+                       object,
+                       user_entry->offset,
+                       user_entry->size,
+                       (user_entry->protection & VM_PROT_ALL));
+               if (kr != KERN_SUCCESS) {
                        goto make_mem_done;
                }
                        goto make_mem_done;
                }
-
-               user_entry->size = map_size;
-               user_entry->offset = parent_entry->offset + map_start;
-               user_entry->data_offset = offset_in_page; 
-               user_entry->is_sub_map = parent_entry->is_sub_map;
-               user_entry->is_pager = parent_entry->is_pager;
-               user_entry->is_copy = parent_entry->is_copy;
-               user_entry->internal = parent_entry->internal;
-               user_entry->protection = protections;
-
-               if(access != MAP_MEM_NOOP) {
-                  SET_MAP_MEM(access, user_entry->protection);
-               }
-
-               if(parent_entry->is_sub_map) {
-                  user_entry->backing.map = parent_entry->backing.map;
-                  vm_map_lock(user_entry->backing.map);
-                  user_entry->backing.map->ref_count++;
-                  vm_map_unlock(user_entry->backing.map);
-               }
-               else if (parent_entry->is_pager) {
-                  user_entry->backing.pager = parent_entry->backing.pager;
-                  /* JMM - don't we need a reference here? */
-               } else {
-                  object = parent_entry->backing.object;
-                  assert(object != VM_OBJECT_NULL);
-                  user_entry->backing.object = object;
-                  /* we now point to this object, hold on */
-                  vm_object_lock(object);
-                  vm_object_reference_locked(object); 
+               assert(user_entry->is_object);
+               /* we now point to this object, hold on */
+               vm_object_lock(object);
+               vm_object_reference_locked(object);
 #if VM_OBJECT_TRACKING_OP_TRUESHARE
                if (!object->true_share &&
                    vm_object_tracking_inited) {
 #if VM_OBJECT_TRACKING_OP_TRUESHARE
                if (!object->true_share &&
                    vm_object_tracking_inited) {
@@ -2846,25 +3186,26 @@ redo_lookup:
                        int num = 0;
 
                        num = OSBacktrace(bt,
                        int num = 0;
 
                        num = OSBacktrace(bt,
-                                         VM_OBJECT_TRACKING_BTDEPTH);
+                           VM_OBJECT_TRACKING_BTDEPTH);
                        btlog_add_entry(vm_object_tracking_btlog,
                        btlog_add_entry(vm_object_tracking_btlog,
-                                       object,
-                                       VM_OBJECT_TRACKING_OP_TRUESHARE,
-                                       bt,
-                                       num);
+                           object,
+                           VM_OBJECT_TRACKING_OP_TRUESHARE,
+                           bt,
+                           num);
                }
 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
 
                }
 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
 
-                  object->true_share = TRUE;
-                  if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
+               object->true_share = TRUE;
+               if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
                        object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
                        object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
-                  vm_object_unlock(object);
                }
                }
-               *size = CAST_DOWN(vm_size_t, (user_entry->size -
-                                             user_entry->data_offset));
-               *object_handle = user_handle;
-               return KERN_SUCCESS;
+               vm_object_unlock(object);
        }
        }
+       *size = CAST_DOWN(vm_size_t, (user_entry->size -
+           user_entry->data_offset));
+       *object_handle = user_handle;
+       DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS);
+       return KERN_SUCCESS;
 
 make_mem_done:
        if (user_handle != IP_NULL) {
 
 make_mem_done:
        if (user_handle != IP_NULL) {
@@ -2875,45 +3216,46 @@ make_mem_done:
                 */
                mach_memory_entry_port_release(user_handle);
        }
                 */
                mach_memory_entry_port_release(user_handle);
        }
+       DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, kr);
        return kr;
 }
 
 kern_return_t
 _mach_make_memory_entry(
        return kr;
 }
 
 kern_return_t
 _mach_make_memory_entry(
-       vm_map_t                target_map,
-       memory_object_size_t    *size,
-       memory_object_offset_t  offset,
-       vm_prot_t               permission,
-       ipc_port_t              *object_handle,
-       ipc_port_t              parent_entry)
-{
-       memory_object_size_t    mo_size;
-       kern_return_t           kr;
-       
+       vm_map_t                target_map,
+       memory_object_size_t    *size,
+       memory_object_offset_t  offset,
+       vm_prot_t               permission,
+       ipc_port_t              *object_handle,
+       ipc_port_t              parent_entry)
+{
+       memory_object_size_t    mo_size;
+       kern_return_t           kr;
+
        mo_size = (memory_object_size_t)*size;
        mo_size = (memory_object_size_t)*size;
-       kr = mach_make_memory_entry_64(target_map, &mo_size, 
-                       (memory_object_offset_t)offset, permission, object_handle,
-                       parent_entry);
+       kr = mach_make_memory_entry_64(target_map, &mo_size,
+           (memory_object_offset_t)offset, permission, object_handle,
+           parent_entry);
        *size = mo_size;
        return kr;
 }
 
 kern_return_t
 mach_make_memory_entry(
        *size = mo_size;
        return kr;
 }
 
 kern_return_t
 mach_make_memory_entry(
-       vm_map_t                target_map,
-       vm_size_t               *size,
-       vm_offset_t             offset,
-       vm_prot_t               permission,
-       ipc_port_t              *object_handle,
-       ipc_port_t              parent_entry)
-{      
-       memory_object_size_t    mo_size;
-       kern_return_t           kr;
-       
+       vm_map_t                target_map,
+       vm_size_t               *size,
+       vm_offset_t             offset,
+       vm_prot_t               permission,
+       ipc_port_t              *object_handle,
+       ipc_port_t              parent_entry)
+{
+       memory_object_size_t    mo_size;
+       kern_return_t           kr;
+
        mo_size = (memory_object_size_t)*size;
        mo_size = (memory_object_size_t)*size;
-       kr = mach_make_memory_entry_64(target_map, &mo_size, 
-                       (memory_object_offset_t)offset, permission, object_handle,
-                       parent_entry);
+       kr = mach_make_memory_entry_64(target_map, &mo_size,
+           (memory_object_offset_t)offset, permission, object_handle,
+           parent_entry);
        *size = CAST_DOWN(vm_size_t, mo_size);
        return kr;
 }
        *size = CAST_DOWN(vm_size_t, mo_size);
        return kr;
 }
@@ -2928,61 +3270,61 @@ mach_make_memory_entry(
  */
 kern_return_t
 task_wire(
  */
 kern_return_t
 task_wire(
-       vm_map_t        map,
-       boolean_t       must_wire)
+       vm_map_t        map,
+       boolean_t       must_wire)
+{
+       if (map == VM_MAP_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       vm_map_lock(map);
+       map->wiring_required = (must_wire == TRUE);
+       vm_map_unlock(map);
+
+       return KERN_SUCCESS;
+}
+
+kern_return_t
+vm_map_exec_lockdown(
+       vm_map_t        map)
 {
 {
-       if (map == VM_MAP_NULL)
-               return(KERN_INVALID_ARGUMENT);
+       if (map == VM_MAP_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
 
-       if (must_wire)
-               map->wiring_required = TRUE;
-       else
-               map->wiring_required = FALSE;
+       vm_map_lock(map);
+       map->map_disallow_new_exec = TRUE;
+       vm_map_unlock(map);
 
 
-       return(KERN_SUCCESS);
+       return KERN_SUCCESS;
 }
 
 }
 
+#if VM_NAMED_ENTRY_LIST
+queue_head_t    vm_named_entry_list = QUEUE_HEAD_INITIALIZER(vm_named_entry_list);
+int             vm_named_entry_count = 0;
+LCK_MTX_EARLY_DECLARE_ATTR(vm_named_entry_list_lock_data,
+    &vm_object_lck_grp, &vm_object_lck_attr);
+#endif /* VM_NAMED_ENTRY_LIST */
+
 __private_extern__ kern_return_t
 mach_memory_entry_allocate(
 __private_extern__ kern_return_t
 mach_memory_entry_allocate(
-       vm_named_entry_t        *user_entry_p,
-       ipc_port_t              *user_handle_p)
+       vm_named_entry_t        *user_entry_p,
+       ipc_port_t              *user_handle_p)
 {
 {
-       vm_named_entry_t        user_entry;
-       ipc_port_t              user_handle;
-       ipc_port_t              previous;
+       vm_named_entry_t        user_entry;
+       ipc_port_t              user_handle;
 
        user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry);
 
        user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry);
-       if (user_entry == NULL)
-               return KERN_FAILURE;
-
-       named_entry_lock_init(user_entry);
-
-       user_handle = ipc_port_alloc_kernel();
-       if (user_handle == IP_NULL) {
-               kfree(user_entry, sizeof *user_entry);
+       if (user_entry == NULL) {
                return KERN_FAILURE;
        }
                return KERN_FAILURE;
        }
-       ip_lock(user_handle);
-
-       /* make a sonce right */
-       user_handle->ip_sorights++;
-       ip_reference(user_handle);
+       bzero(user_entry, sizeof(*user_entry));
 
 
-       user_handle->ip_destination = IP_NULL;
-       user_handle->ip_receiver_name = MACH_PORT_NULL;
-       user_handle->ip_receiver = ipc_space_kernel;
-
-       /* make a send right */
-        user_handle->ip_mscount++;
-        user_handle->ip_srights++;
-        ip_reference(user_handle);
-
-       ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
-       /* nsrequest unlocks user_handle */
+       named_entry_lock_init(user_entry);
 
 
-       user_entry->backing.pager = NULL;
+       user_entry->backing.copy = NULL;
+       user_entry->is_object = FALSE;
        user_entry->is_sub_map = FALSE;
        user_entry->is_sub_map = FALSE;
-       user_entry->is_pager = FALSE;
        user_entry->is_copy = FALSE;
        user_entry->internal = FALSE;
        user_entry->size = 0;
        user_entry->is_copy = FALSE;
        user_entry->internal = FALSE;
        user_entry->size = 0;
@@ -2991,12 +3333,28 @@ mach_memory_entry_allocate(
        user_entry->protection = VM_PROT_NONE;
        user_entry->ref_count = 1;
 
        user_entry->protection = VM_PROT_NONE;
        user_entry->ref_count = 1;
 
-       ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
-                       IKOT_NAMED_ENTRY);
+       user_handle = ipc_kobject_alloc_port((ipc_kobject_t)user_entry,
+           IKOT_NAMED_ENTRY,
+           IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
 
        *user_entry_p = user_entry;
        *user_handle_p = user_handle;
 
 
        *user_entry_p = user_entry;
        *user_handle_p = user_handle;
 
+#if VM_NAMED_ENTRY_LIST
+       /* keep a loose (no reference) pointer to the Mach port, for debugging only */
+       user_entry->named_entry_port = user_handle;
+       /* backtrace at allocation time, for debugging only */
+       OSBacktrace(&user_entry->named_entry_bt[0],
+           NAMED_ENTRY_BT_DEPTH);
+
+       /* add this new named entry to the global list */
+       lck_mtx_lock_spin(&vm_named_entry_list_lock_data);
+       queue_enter(&vm_named_entry_list, user_entry,
+           vm_named_entry_t, named_entry_list);
+       vm_named_entry_count++;
+       lck_mtx_unlock(&vm_named_entry_list_lock_data);
+#endif /* VM_NAMED_ENTRY_LIST */
+
        return KERN_SUCCESS;
 }
 
        return KERN_SUCCESS;
 }
 
@@ -3005,94 +3363,134 @@ mach_memory_entry_allocate(
  *
  *     Create a named entry backed by the provided pager.
  *
  *
  *     Create a named entry backed by the provided pager.
  *
- *     JMM - we need to hold a reference on the pager -
- *     and release it when the named entry is destroyed.
  */
 kern_return_t
 mach_memory_object_memory_entry_64(
  */
 kern_return_t
 mach_memory_object_memory_entry_64(
-       host_t                  host,
-       boolean_t               internal,
-       vm_object_offset_t      size,
-       vm_prot_t               permission,
-       memory_object_t         pager,
-       ipc_port_t              *entry_handle)
+       host_t                  host,
+       boolean_t               internal,
+       vm_object_offset_t      size,
+       vm_prot_t               permission,
+       memory_object_t         pager,
+       ipc_port_t              *entry_handle)
 {
 {
-       unsigned int            access;
-       vm_named_entry_t        user_entry;
-       ipc_port_t              user_handle;
+       unsigned int            access;
+       vm_named_entry_t        user_entry;
+       ipc_port_t              user_handle;
+       vm_object_t             object;
+       kern_return_t           kr;
 
 
-        if (host == HOST_NULL)
-                return(KERN_INVALID_HOST);
+       if (host == HOST_NULL) {
+               return KERN_INVALID_HOST;
+       }
+
+       if (pager == MEMORY_OBJECT_NULL && internal) {
+               object = vm_object_allocate(size);
+               if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
+                       object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+               }
+       } else {
+               object = memory_object_to_vm_object(pager);
+               if (object != VM_OBJECT_NULL) {
+                       vm_object_reference(object);
+               }
+       }
+       if (object == VM_OBJECT_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
        if (mach_memory_entry_allocate(&user_entry, &user_handle)
            != KERN_SUCCESS) {
 
        if (mach_memory_entry_allocate(&user_entry, &user_handle)
            != KERN_SUCCESS) {
+               vm_object_deallocate(object);
                return KERN_FAILURE;
        }
 
                return KERN_FAILURE;
        }
 
-       user_entry->backing.pager = pager;
        user_entry->size = size;
        user_entry->offset = 0;
        user_entry->protection = permission & VM_PROT_ALL;
        access = GET_MAP_MEM(permission);
        SET_MAP_MEM(access, user_entry->protection);
        user_entry->size = size;
        user_entry->offset = 0;
        user_entry->protection = permission & VM_PROT_ALL;
        access = GET_MAP_MEM(permission);
        SET_MAP_MEM(access, user_entry->protection);
-       user_entry->internal = internal;
        user_entry->is_sub_map = FALSE;
        user_entry->is_sub_map = FALSE;
-       user_entry->is_pager = TRUE;
        assert(user_entry->ref_count == 1);
 
        assert(user_entry->ref_count == 1);
 
+       kr = vm_named_entry_from_vm_object(user_entry, object, 0, size,
+           (user_entry->protection & VM_PROT_ALL));
+       if (kr != KERN_SUCCESS) {
+               return kr;
+       }
+       user_entry->internal = object->internal;
+       assert(object->internal == internal);
+
        *entry_handle = user_handle;
        return KERN_SUCCESS;
        *entry_handle = user_handle;
        return KERN_SUCCESS;
-}      
+}
 
 kern_return_t
 mach_memory_object_memory_entry(
 
 kern_return_t
 mach_memory_object_memory_entry(
-       host_t          host,
-       boolean_t       internal,
-       vm_size_t       size,
-       vm_prot_t       permission,
-       memory_object_t pager,
-       ipc_port_t      *entry_handle)
+       host_t          host,
+       boolean_t       internal,
+       vm_size_t       size,
+       vm_prot_t       permission,
+       memory_object_t pager,
+       ipc_port_t      *entry_handle)
 {
 {
-       return mach_memory_object_memory_entry_64( host, internal, 
-               (vm_object_offset_t)size, permission, pager, entry_handle);
+       return mach_memory_object_memory_entry_64( host, internal,
+                  (vm_object_offset_t)size, permission, pager, entry_handle);
 }
 
 
 kern_return_t
 mach_memory_entry_purgable_control(
 }
 
 
 kern_return_t
 mach_memory_entry_purgable_control(
-       ipc_port_t      entry_port,
-       vm_purgable_t   control,
-       int             *state)
+       ipc_port_t      entry_port,
+       vm_purgable_t   control,
+       int             *state)
 {
 {
-       kern_return_t           kr;
-       vm_named_entry_t        mem_entry;
-       vm_object_t             object;
+       if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
+               /* not allowed from user-space */
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       return memory_entry_purgeable_control_internal(entry_port, control, state);
+}
 
 
-       if (entry_port == IP_NULL ||
+kern_return_t
+memory_entry_purgeable_control_internal(
+       ipc_port_t      entry_port,
+       vm_purgable_t   control,
+       int             *state)
+{
+       kern_return_t           kr;
+       vm_named_entry_t        mem_entry;
+       vm_object_t             object;
+
+       if (!IP_VALID(entry_port) ||
            ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
                return KERN_INVALID_ARGUMENT;
        }
        if (control != VM_PURGABLE_SET_STATE &&
            ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
                return KERN_INVALID_ARGUMENT;
        }
        if (control != VM_PURGABLE_SET_STATE &&
-           control != VM_PURGABLE_GET_STATE)
-               return(KERN_INVALID_ARGUMENT);
+           control != VM_PURGABLE_GET_STATE &&
+           control != VM_PURGABLE_SET_STATE_FROM_KERNEL) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
 
-       if (control == VM_PURGABLE_SET_STATE &&
+       if ((control == VM_PURGABLE_SET_STATE ||
+           control == VM_PURGABLE_SET_STATE_FROM_KERNEL) &&
            (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) ||
            (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) ||
-            ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK)))
-               return(KERN_INVALID_ARGUMENT);
+           ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
 
-       mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
+       mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port);
 
        named_entry_lock(mem_entry);
 
        if (mem_entry->is_sub_map ||
 
        named_entry_lock(mem_entry);
 
        if (mem_entry->is_sub_map ||
-           mem_entry->is_pager ||
            mem_entry->is_copy) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
        }
 
            mem_entry->is_copy) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
        }
 
-       object = mem_entry->backing.object;
+       assert(mem_entry->is_object);
+       object = vm_named_entry_to_vm_object(mem_entry);
        if (object == VM_OBJECT_NULL) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
        if (object == VM_OBJECT_NULL) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
@@ -3116,35 +3514,182 @@ mach_memory_entry_purgable_control(
        return kr;
 }
 
        return kr;
 }
 
+kern_return_t
+mach_memory_entry_access_tracking(
+       ipc_port_t      entry_port,
+       int             *access_tracking,
+       uint32_t        *access_tracking_reads,
+       uint32_t        *access_tracking_writes)
+{
+       return memory_entry_access_tracking_internal(entry_port,
+                  access_tracking,
+                  access_tracking_reads,
+                  access_tracking_writes);
+}
+
+kern_return_t
+memory_entry_access_tracking_internal(
+       ipc_port_t      entry_port,
+       int             *access_tracking,
+       uint32_t        *access_tracking_reads,
+       uint32_t        *access_tracking_writes)
+{
+       vm_named_entry_t        mem_entry;
+       vm_object_t             object;
+       kern_return_t           kr;
+
+       if (!IP_VALID(entry_port) ||
+           ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port);
+
+       named_entry_lock(mem_entry);
+
+       if (mem_entry->is_sub_map ||
+           mem_entry->is_copy) {
+               named_entry_unlock(mem_entry);
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       assert(mem_entry->is_object);
+       object = vm_named_entry_to_vm_object(mem_entry);
+       if (object == VM_OBJECT_NULL) {
+               named_entry_unlock(mem_entry);
+               return KERN_INVALID_ARGUMENT;
+       }
+
+#if VM_OBJECT_ACCESS_TRACKING
+       vm_object_access_tracking(object,
+           access_tracking,
+           access_tracking_reads,
+           access_tracking_writes);
+       kr = KERN_SUCCESS;
+#else /* VM_OBJECT_ACCESS_TRACKING */
+       (void) access_tracking;
+       (void) access_tracking_reads;
+       (void) access_tracking_writes;
+       kr = KERN_NOT_SUPPORTED;
+#endif /* VM_OBJECT_ACCESS_TRACKING */
+
+       named_entry_unlock(mem_entry);
+
+       return kr;
+}
+
+kern_return_t
+mach_memory_entry_ownership(
+       ipc_port_t      entry_port,
+       task_t          owner,
+       int             ledger_tag,
+       int             ledger_flags)
+{
+       task_t                  cur_task;
+       kern_return_t           kr;
+       vm_named_entry_t        mem_entry;
+       vm_object_t             object;
+
+       cur_task = current_task();
+       if (cur_task != kernel_task &&
+           (owner != cur_task ||
+           (ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT) ||
+           ledger_tag == VM_LEDGER_TAG_NETWORK)) {
+               /*
+                * An entitlement is required to:
+                * + tranfer memory ownership to someone else,
+                * + request that the memory not count against the footprint,
+                * + tag as "network" (since that implies "no footprint")
+                */
+               if (!cur_task->task_can_transfer_memory_ownership &&
+                   IOTaskHasEntitlement(cur_task,
+                   "com.apple.private.memory.ownership_transfer")) {
+                       cur_task->task_can_transfer_memory_ownership = TRUE;
+               }
+               if (!cur_task->task_can_transfer_memory_ownership) {
+                       return KERN_NO_ACCESS;
+               }
+       }
+
+       if (ledger_flags & ~VM_LEDGER_FLAGS) {
+               return KERN_INVALID_ARGUMENT;
+       }
+       if (ledger_tag <= 0 ||
+           ledger_tag > VM_LEDGER_TAG_MAX) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       if (!IP_VALID(entry_port) ||
+           ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
+               return KERN_INVALID_ARGUMENT;
+       }
+       mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port);
+
+       named_entry_lock(mem_entry);
+
+       if (mem_entry->is_sub_map ||
+           mem_entry->is_copy) {
+               named_entry_unlock(mem_entry);
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       assert(mem_entry->is_object);
+       object = vm_named_entry_to_vm_object(mem_entry);
+       if (object == VM_OBJECT_NULL) {
+               named_entry_unlock(mem_entry);
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       vm_object_lock(object);
+
+       /* check that named entry covers entire object ? */
+       if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) {
+               vm_object_unlock(object);
+               named_entry_unlock(mem_entry);
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       named_entry_unlock(mem_entry);
+
+       kr = vm_object_ownership_change(object,
+           ledger_tag,
+           owner,
+           ledger_flags,
+           FALSE);                             /* task_objq_locked */
+       vm_object_unlock(object);
+
+       return kr;
+}
+
 kern_return_t
 mach_memory_entry_get_page_counts(
 kern_return_t
 mach_memory_entry_get_page_counts(
-       ipc_port_t      entry_port,
-       unsigned int    *resident_page_count,
-       unsigned int    *dirty_page_count)
+       ipc_port_t      entry_port,
+       unsigned int    *resident_page_count,
+       unsigned int    *dirty_page_count)
 {
 {
-       kern_return_t           kr;
-       vm_named_entry_t        mem_entry;
-       vm_object_t             object;
-       vm_object_offset_t      offset;
-       vm_object_size_t        size;
+       kern_return_t           kr;
+       vm_named_entry_t        mem_entry;
+       vm_object_t             object;
+       vm_object_offset_t      offset;
+       vm_object_size_t        size;
 
 
-       if (entry_port == IP_NULL ||
+       if (!IP_VALID(entry_port) ||
            ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
                return KERN_INVALID_ARGUMENT;
        }
 
            ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
                return KERN_INVALID_ARGUMENT;
        }
 
-       mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
+       mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port);
 
        named_entry_lock(mem_entry);
 
        if (mem_entry->is_sub_map ||
 
        named_entry_lock(mem_entry);
 
        if (mem_entry->is_sub_map ||
-           mem_entry->is_pager ||
            mem_entry->is_copy) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
        }
 
            mem_entry->is_copy) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
        }
 
-       object = mem_entry->backing.object;
+       assert(mem_entry->is_object);
+       object = vm_named_entry_to_vm_object(mem_entry);
        if (object == VM_OBJECT_NULL) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
        if (object == VM_OBJECT_NULL) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
@@ -3154,6 +3699,8 @@ mach_memory_entry_get_page_counts(
 
        offset = mem_entry->offset;
        size = mem_entry->size;
 
        offset = mem_entry->offset;
        size = mem_entry->size;
+       size = vm_object_round_page(offset + size) - vm_object_trunc_page(offset);
+       offset = vm_object_trunc_page(offset);
 
        named_entry_unlock(mem_entry);
 
 
        named_entry_unlock(mem_entry);
 
@@ -3164,6 +3711,136 @@ mach_memory_entry_get_page_counts(
        return kr;
 }
 
        return kr;
 }
 
+kern_return_t
+mach_memory_entry_phys_page_offset(
+       ipc_port_t              entry_port,
+       vm_object_offset_t      *offset_p)
+{
+       vm_named_entry_t        mem_entry;
+       vm_object_t             object;
+       vm_object_offset_t      offset;
+       vm_object_offset_t      data_offset;
+
+       if (!IP_VALID(entry_port) ||
+           ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       mem_entry = (vm_named_entry_t) ipc_kobject_get(entry_port);
+
+       named_entry_lock(mem_entry);
+
+       if (mem_entry->is_sub_map ||
+           mem_entry->is_copy) {
+               named_entry_unlock(mem_entry);
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       assert(mem_entry->is_object);
+       object = vm_named_entry_to_vm_object(mem_entry);
+       if (object == VM_OBJECT_NULL) {
+               named_entry_unlock(mem_entry);
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       offset = mem_entry->offset;
+       data_offset = mem_entry->data_offset;
+
+       named_entry_unlock(mem_entry);
+
+       *offset_p = offset - vm_object_trunc_page(offset) + data_offset;
+       return KERN_SUCCESS;
+}
+
+kern_return_t
+mach_memory_entry_map_size(
+       ipc_port_t             entry_port,
+       vm_map_t               map,
+       memory_object_offset_t offset,
+       memory_object_offset_t size,
+       mach_vm_size_t         *map_size)
+{
+       vm_named_entry_t        mem_entry;
+       vm_object_t             object;
+       vm_object_offset_t      object_offset_start, object_offset_end;
+       vm_map_copy_t           copy_map, target_copy_map;
+       vm_map_offset_t         overmap_start, overmap_end, trimmed_start;
+       kern_return_t           kr;
+
+       if (!IP_VALID(entry_port) ||
+           ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       mem_entry = (vm_named_entry_t) ipc_kobject_get(entry_port);
+       named_entry_lock(mem_entry);
+
+       if (mem_entry->is_sub_map) {
+               named_entry_unlock(mem_entry);
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       if (mem_entry->is_object) {
+               object = vm_named_entry_to_vm_object(mem_entry);
+               if (object == VM_OBJECT_NULL) {
+                       named_entry_unlock(mem_entry);
+                       return KERN_INVALID_ARGUMENT;
+               }
+
+               object_offset_start = mem_entry->offset;
+               object_offset_start += mem_entry->data_offset;
+               object_offset_start += offset;
+               object_offset_end = object_offset_start + size;
+               object_offset_start = vm_map_trunc_page(object_offset_start,
+                   VM_MAP_PAGE_MASK(map));
+               object_offset_end = vm_map_round_page(object_offset_end,
+                   VM_MAP_PAGE_MASK(map));
+
+               named_entry_unlock(mem_entry);
+
+               *map_size = object_offset_end - object_offset_start;
+               return KERN_SUCCESS;
+       }
+
+       if (!mem_entry->is_copy) {
+               panic("unsupported type of mem_entry %p\n", mem_entry);
+       }
+
+       assert(mem_entry->is_copy);
+       if (VM_MAP_COPY_PAGE_MASK(mem_entry->backing.copy) == VM_MAP_PAGE_MASK(map)) {
+               *map_size = vm_map_round_page(mem_entry->offset + mem_entry->data_offset + offset + size, VM_MAP_PAGE_MASK(map)) - vm_map_trunc_page(mem_entry->offset + mem_entry->data_offset + offset, VM_MAP_PAGE_MASK(map));
+               DEBUG4K_SHARE("map %p (%d) mem_entry %p offset 0x%llx + 0x%llx + 0x%llx size 0x%llx -> map_size 0x%llx\n", map, VM_MAP_PAGE_MASK(map), mem_entry, mem_entry->offset, mem_entry->data_offset, offset, size, *map_size);
+               named_entry_unlock(mem_entry);
+               return KERN_SUCCESS;
+       }
+
+       DEBUG4K_SHARE("mem_entry %p copy %p (%d) map %p (%d) offset 0x%llx size 0x%llx\n", mem_entry, mem_entry->backing.copy, VM_MAP_COPY_PAGE_SHIFT(mem_entry->backing.copy), map, VM_MAP_PAGE_SHIFT(map), offset, size);
+       copy_map = mem_entry->backing.copy;
+       target_copy_map = VM_MAP_COPY_NULL;
+       DEBUG4K_ADJUST("adjusting...\n");
+       kr = vm_map_copy_adjust_to_target(copy_map,
+           mem_entry->data_offset + offset,
+           size,
+           map,
+           FALSE,
+           &target_copy_map,
+           &overmap_start,
+           &overmap_end,
+           &trimmed_start);
+       if (kr == KERN_SUCCESS) {
+               if (target_copy_map->size != copy_map->size) {
+                       DEBUG4K_ADJUST("copy %p (%d) map %p (%d) offset 0x%llx size 0x%llx overmap_start 0x%llx overmap_end 0x%llx trimmed_start 0x%llx map_size 0x%llx -> 0x%llx\n", copy_map, VM_MAP_COPY_PAGE_SHIFT(copy_map), map, VM_MAP_PAGE_SHIFT(map), (uint64_t)offset, (uint64_t)size, (uint64_t)overmap_start, (uint64_t)overmap_end, (uint64_t)trimmed_start, (uint64_t)copy_map->size, (uint64_t)target_copy_map->size);
+               }
+               *map_size = target_copy_map->size;
+               if (target_copy_map != copy_map) {
+                       vm_map_copy_discard(target_copy_map);
+               }
+               target_copy_map = VM_MAP_COPY_NULL;
+       }
+       named_entry_unlock(mem_entry);
+       return kr;
+}
+
 /*
  * mach_memory_entry_port_release:
  *
 /*
  * mach_memory_entry_port_release:
  *
@@ -3173,7 +3850,7 @@ mach_memory_entry_get_page_counts(
  */
 void
 mach_memory_entry_port_release(
  */
 void
 mach_memory_entry_port_release(
-       ipc_port_t      port)
+       ipc_port_t      port)
 {
        assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
        ipc_port_release_send(port);
 {
        assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
        ipc_port_release_send(port);
@@ -3193,36 +3870,45 @@ mach_memory_entry_port_release(
  */
 void
 mach_destroy_memory_entry(
  */
 void
 mach_destroy_memory_entry(
-       ipc_port_t      port)
+       ipc_port_t      port)
 {
 {
-       vm_named_entry_t        named_entry;
+       vm_named_entry_t        named_entry;
 #if MACH_ASSERT
        assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
 #endif /* MACH_ASSERT */
 #if MACH_ASSERT
        assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
 #endif /* MACH_ASSERT */
-       named_entry = (vm_named_entry_t)port->ip_kobject;
+       named_entry = (vm_named_entry_t) ip_get_kobject(port);
 
        named_entry_lock(named_entry);
        named_entry->ref_count -= 1;
 
 
        named_entry_lock(named_entry);
        named_entry->ref_count -= 1;
 
-       if(named_entry->ref_count == 0) {
+       if (named_entry->ref_count == 0) {
                if (named_entry->is_sub_map) {
                        vm_map_deallocate(named_entry->backing.map);
                if (named_entry->is_sub_map) {
                        vm_map_deallocate(named_entry->backing.map);
-               } else if (named_entry->is_pager) {
-                       /* JMM - need to drop reference on pager in that case */
                } else if (named_entry->is_copy) {
                        vm_map_copy_discard(named_entry->backing.copy);
                } else if (named_entry->is_copy) {
                        vm_map_copy_discard(named_entry->backing.copy);
+               } else if (named_entry->is_object) {
+                       assert(named_entry->backing.copy->cpy_hdr.nentries == 1);
+                       vm_map_copy_discard(named_entry->backing.copy);
                } else {
                } else {
-                       /* release the VM object we've been pointing to */
-                       vm_object_deallocate(named_entry->backing.object);
+                       assert(named_entry->backing.copy == VM_MAP_COPY_NULL);
                }
 
                named_entry_unlock(named_entry);
                named_entry_lock_destroy(named_entry);
 
                }
 
                named_entry_unlock(named_entry);
                named_entry_lock_destroy(named_entry);
 
-               kfree((void *) port->ip_kobject,
-                     sizeof (struct vm_named_entry));
-       } else
+#if VM_NAMED_ENTRY_LIST
+               lck_mtx_lock_spin(&vm_named_entry_list_lock_data);
+               queue_remove(&vm_named_entry_list, named_entry,
+                   vm_named_entry_t, named_entry_list);
+               assert(vm_named_entry_count > 0);
+               vm_named_entry_count--;
+               lck_mtx_unlock(&vm_named_entry_list_lock_data);
+#endif /* VM_NAMED_ENTRY_LIST */
+
+               kfree(named_entry, sizeof(struct vm_named_entry));
+       } else {
                named_entry_unlock(named_entry);
                named_entry_unlock(named_entry);
+       }
 }
 
 /* Allow manipulation of individual page state.  This is actually part of */
 }
 
 /* Allow manipulation of individual page state.  This is actually part of */
@@ -3230,33 +3916,33 @@ mach_destroy_memory_entry(
 
 kern_return_t
 mach_memory_entry_page_op(
 
 kern_return_t
 mach_memory_entry_page_op(
-       ipc_port_t              entry_port,
-       vm_object_offset_t      offset,
-       int                     ops,
-       ppnum_t                 *phys_entry,
-       int                     *flags)
+       ipc_port_t              entry_port,
+       vm_object_offset_t      offset,
+       int                     ops,
+       ppnum_t                 *phys_entry,
+       int                     *flags)
 {
 {
-       vm_named_entry_t        mem_entry;
-       vm_object_t             object;
-       kern_return_t           kr;
+       vm_named_entry_t        mem_entry;
+       vm_object_t             object;
+       kern_return_t           kr;
 
 
-       if (entry_port == IP_NULL ||
+       if (!IP_VALID(entry_port) ||
            ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
                return KERN_INVALID_ARGUMENT;
        }
 
            ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
                return KERN_INVALID_ARGUMENT;
        }
 
-       mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
+       mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port);
 
        named_entry_lock(mem_entry);
 
        if (mem_entry->is_sub_map ||
 
        named_entry_lock(mem_entry);
 
        if (mem_entry->is_sub_map ||
-           mem_entry->is_pager ||
            mem_entry->is_copy) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
        }
 
            mem_entry->is_copy) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
        }
 
-       object = mem_entry->backing.object;
+       assert(mem_entry->is_object);
+       object = vm_named_entry_to_vm_object(mem_entry);
        if (object == VM_OBJECT_NULL) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
        if (object == VM_OBJECT_NULL) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
@@ -3267,50 +3953,50 @@ mach_memory_entry_page_op(
 
        kr = vm_object_page_op(object, offset, ops, phys_entry, flags);
 
 
        kr = vm_object_page_op(object, offset, ops, phys_entry, flags);
 
-       vm_object_deallocate(object);   
+       vm_object_deallocate(object);
 
        return kr;
 }
 
 /*
 
        return kr;
 }
 
 /*
- * mach_memory_entry_range_op offers performance enhancement over 
- * mach_memory_entry_page_op for page_op functions which do not require page 
- * level state to be returned from the call.  Page_op was created to provide 
- * a low-cost alternative to page manipulation via UPLs when only a single 
- * page was involved.  The range_op call establishes the ability in the _op 
+ * mach_memory_entry_range_op offers performance enhancement over
+ * mach_memory_entry_page_op for page_op functions which do not require page
+ * level state to be returned from the call.  Page_op was created to provide
+ * a low-cost alternative to page manipulation via UPLs when only a single
+ * page was involved.  The range_op call establishes the ability in the _op
  * family of functions to work on multiple pages where the lack of page level
  * state handling allows the caller to avoid the overhead of the upl structures.
  */
 
 kern_return_t
 mach_memory_entry_range_op(
  * family of functions to work on multiple pages where the lack of page level
  * state handling allows the caller to avoid the overhead of the upl structures.
  */
 
 kern_return_t
 mach_memory_entry_range_op(
-       ipc_port_t              entry_port,
-       vm_object_offset_t      offset_beg,
-       vm_object_offset_t      offset_end,
+       ipc_port_t              entry_port,
+       vm_object_offset_t      offset_beg,
+       vm_object_offset_t      offset_end,
        int                     ops,
        int                     *range)
 {
        int                     ops,
        int                     *range)
 {
-       vm_named_entry_t        mem_entry;
-       vm_object_t             object;
-       kern_return_t           kr;
+       vm_named_entry_t        mem_entry;
+       vm_object_t             object;
+       kern_return_t           kr;
 
 
-       if (entry_port == IP_NULL ||
+       if (!IP_VALID(entry_port) ||
            ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
                return KERN_INVALID_ARGUMENT;
        }
 
            ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
                return KERN_INVALID_ARGUMENT;
        }
 
-       mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
+       mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port);
 
        named_entry_lock(mem_entry);
 
        if (mem_entry->is_sub_map ||
 
        named_entry_lock(mem_entry);
 
        if (mem_entry->is_sub_map ||
-           mem_entry->is_pager ||
            mem_entry->is_copy) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
        }
 
            mem_entry->is_copy) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
        }
 
-       object = mem_entry->backing.object;
+       assert(mem_entry->is_object);
+       object = vm_named_entry_to_vm_object(mem_entry);
        if (object == VM_OBJECT_NULL) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
        if (object == VM_OBJECT_NULL) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
@@ -3320,59 +4006,16 @@ mach_memory_entry_range_op(
        named_entry_unlock(mem_entry);
 
        kr = vm_object_range_op(object,
        named_entry_unlock(mem_entry);
 
        kr = vm_object_range_op(object,
-                               offset_beg,
-                               offset_end,
-                               ops,
-                               (uint32_t *) range);
+           offset_beg,
+           offset_end,
+           ops,
+           (uint32_t *) range);
 
        vm_object_deallocate(object);
 
        return kr;
 }
 
 
        vm_object_deallocate(object);
 
        return kr;
 }
 
-static void dp_control_port_init(void)
-{
-       lck_grp_init(&dynamic_pager_control_port_lock_group,"dp_control_port", LCK_GRP_ATTR_NULL);
-       lck_mtx_init(&dynamic_pager_control_port_lock, &dynamic_pager_control_port_lock_group, LCK_ATTR_NULL);
-}
-
-kern_return_t
-set_dp_control_port(
-       host_priv_t     host_priv,
-       ipc_port_t      control_port)   
-{
-       ipc_port_t old_port;
-
-       if (host_priv == HOST_PRIV_NULL)
-                return (KERN_INVALID_HOST);
-
-       lck_mtx_lock(&dynamic_pager_control_port_lock);
-       old_port = dynamic_pager_control_port;
-       dynamic_pager_control_port = control_port;
-       lck_mtx_unlock(&dynamic_pager_control_port_lock);
-
-       if (IP_VALID(old_port))
-               ipc_port_release_send(old_port);
-
-       return KERN_SUCCESS;
-}
-
-kern_return_t
-get_dp_control_port(
-       host_priv_t     host_priv,
-       ipc_port_t      *control_port)  
-{
-       if (host_priv == HOST_PRIV_NULL)
-                return (KERN_INVALID_HOST);
-
-       lck_mtx_lock(&dynamic_pager_control_port_lock);
-       *control_port = ipc_port_copy_send(dynamic_pager_control_port);
-       lck_mtx_unlock(&dynamic_pager_control_port_lock);
-
-       return KERN_SUCCESS;
-       
-}
-
 /* ******* Temporary Internal calls to UPL for BSD ***** */
 
 extern int kernel_upl_map(
 /* ******* Temporary Internal calls to UPL for BSD ***** */
 
 extern int kernel_upl_map(
@@ -3387,15 +4030,15 @@ extern int kernel_upl_unmap(
 extern int kernel_upl_commit(
        upl_t                   upl,
        upl_page_info_t         *pl,
 extern int kernel_upl_commit(
        upl_t                   upl,
        upl_page_info_t         *pl,
-       mach_msg_type_number_t   count);
+       mach_msg_type_number_t   count);
 
 extern int kernel_upl_commit_range(
        upl_t                   upl,
        upl_offset_t             offset,
 
 extern int kernel_upl_commit_range(
        upl_t                   upl,
        upl_offset_t             offset,
-       upl_size_t              size,
-       int                     flags,
-       upl_page_info_array_t   pl,
-       mach_msg_type_number_t  count);
+       upl_size_t              size,
+       int                     flags,
+       upl_page_info_array_t   pl,
+       mach_msg_type_number_t  count);
 
 extern int kernel_upl_abort(
        upl_t                   upl,
 
 extern int kernel_upl_abort(
        upl_t                   upl,
@@ -3410,9 +4053,9 @@ extern int kernel_upl_abort_range(
 
 kern_return_t
 kernel_upl_map(
 
 kern_return_t
 kernel_upl_map(
-       vm_map_t        map,
-       upl_t           upl,
-       vm_offset_t     *dst_addr)
+       vm_map_t        map,
+       upl_t           upl,
+       vm_offset_t     *dst_addr)
 {
        return vm_upl_map(map, upl, dst_addr);
 }
 {
        return vm_upl_map(map, upl, dst_addr);
 }
@@ -3420,8 +4063,8 @@ kernel_upl_map(
 
 kern_return_t
 kernel_upl_unmap(
 
 kern_return_t
 kernel_upl_unmap(
-       vm_map_t        map,
-       upl_t           upl)
+       vm_map_t        map,
+       upl_t           upl)
 {
        return vm_upl_unmap(map, upl);
 }
 {
        return vm_upl_unmap(map, upl);
 }
@@ -3432,7 +4075,7 @@ kernel_upl_commit(
        upl_page_info_t        *pl,
        mach_msg_type_number_t  count)
 {
        upl_page_info_t        *pl,
        mach_msg_type_number_t  count)
 {
-       kern_return_t   kr;
+       kern_return_t   kr;
 
        kr = upl_commit(upl, pl, count);
        upl_deallocate(upl);
 
        kr = upl_commit(upl, pl, count);
        upl_deallocate(upl);
@@ -3442,18 +4085,19 @@ kernel_upl_commit(
 
 kern_return_t
 kernel_upl_commit_range(
 
 kern_return_t
 kernel_upl_commit_range(
-       upl_t                   upl,
-       upl_offset_t            offset,
-       upl_size_t              size,
-       int                     flags,
+       upl_t                   upl,
+       upl_offset_t            offset,
+       upl_size_t              size,
+       int                     flags,
        upl_page_info_array_t   pl,
        mach_msg_type_number_t  count)
 {
        upl_page_info_array_t   pl,
        mach_msg_type_number_t  count)
 {
-       boolean_t               finished = FALSE;
-       kern_return_t           kr;
+       boolean_t               finished = FALSE;
+       kern_return_t           kr;
 
 
-       if (flags & UPL_COMMIT_FREE_ON_EMPTY)
+       if (flags & UPL_COMMIT_FREE_ON_EMPTY) {
                flags |= UPL_COMMIT_NOTIFY_EMPTY;
                flags |= UPL_COMMIT_NOTIFY_EMPTY;
+       }
 
        if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
                return KERN_INVALID_ARGUMENT;
 
        if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
                return KERN_INVALID_ARGUMENT;
@@ -3461,39 +4105,42 @@ kernel_upl_commit_range(
 
        kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
 
 
        kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
 
-       if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished)
+       if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished) {
                upl_deallocate(upl);
                upl_deallocate(upl);
+       }
 
        return kr;
 }
 
        return kr;
 }
-       
+
 kern_return_t
 kernel_upl_abort_range(
 kern_return_t
 kernel_upl_abort_range(
-       upl_t                   upl,
-       upl_offset_t            offset,
-       upl_size_t              size,
-       int                     abort_flags)
+       upl_t                   upl,
+       upl_offset_t            offset,
+       upl_size_t              size,
+       int                     abort_flags)
 {
 {
-       kern_return_t           kr;
-       boolean_t               finished = FALSE;
+       kern_return_t           kr;
+       boolean_t               finished = FALSE;
 
 
-       if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY)
+       if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY) {
                abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
                abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
+       }
 
        kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
 
 
        kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
 
-       if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished)
+       if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished) {
                upl_deallocate(upl);
                upl_deallocate(upl);
+       }
 
        return kr;
 }
 
 kern_return_t
 kernel_upl_abort(
 
        return kr;
 }
 
 kern_return_t
 kernel_upl_abort(
-       upl_t                   upl,
-       int                     abort_type)
+       upl_t                   upl,
+       int                     abort_type)
 {
 {
-       kern_return_t   kr;
+       kern_return_t   kr;
 
        kr = upl_abort(upl, abort_type);
        upl_deallocate(upl);
 
        kr = upl_abort(upl, abort_type);
        upl_deallocate(upl);
@@ -3508,15 +4155,15 @@ kernel_upl_abort(
 
 kern_return_t
 vm_region_object_create(
 
 kern_return_t
 vm_region_object_create(
-       __unused vm_map_t       target_map,
-       vm_size_t               size,
-       ipc_port_t              *object_handle)
+       __unused vm_map_t       target_map,
+       vm_size_t               size,
+       ipc_port_t              *object_handle)
 {
 {
-       vm_named_entry_t        user_entry;
-       ipc_port_t              user_handle;
+       vm_named_entry_t        user_entry;
+       ipc_port_t              user_handle;
+
+       vm_map_t        new_map;
 
 
-       vm_map_t        new_map;
-       
        if (mach_memory_entry_allocate(&user_entry, &user_handle)
            != KERN_SUCCESS) {
                return KERN_FAILURE;
        if (mach_memory_entry_allocate(&user_entry, &user_handle)
            != KERN_SUCCESS) {
                return KERN_FAILURE;
@@ -3525,9 +4172,9 @@ vm_region_object_create(
        /* Create a named object based on a submap of specified size */
 
        new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS,
        /* Create a named object based on a submap of specified size */
 
        new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS,
-                               vm_map_round_page(size,
-                                                 VM_MAP_PAGE_MASK(target_map)),
-                               TRUE);
+           vm_map_round_page(size,
+           VM_MAP_PAGE_MASK(target_map)),
+           TRUE);
        vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(target_map));
 
        user_entry->backing.map = new_map;
        vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(target_map));
 
        user_entry->backing.map = new_map;
@@ -3540,40 +4187,38 @@ vm_region_object_create(
 
        *object_handle = user_handle;
        return KERN_SUCCESS;
 
        *object_handle = user_handle;
        return KERN_SUCCESS;
-
 }
 
 }
 
-ppnum_t vm_map_get_phys_page(          /* forward */
-       vm_map_t        map,
-       vm_offset_t     offset);
+ppnum_t vm_map_get_phys_page(           /* forward */
+       vm_map_t        map,
+       vm_offset_t     offset);
 
 ppnum_t
 vm_map_get_phys_page(
 
 ppnum_t
 vm_map_get_phys_page(
-       vm_map_t                map,
-       vm_offset_t             addr)
+       vm_map_t                map,
+       vm_offset_t             addr)
 {
 {
-       vm_object_offset_t      offset;
-       vm_object_t             object;
-       vm_map_offset_t         map_offset;
-       vm_map_entry_t          entry;
-       ppnum_t                 phys_page = 0;
+       vm_object_offset_t      offset;
+       vm_object_t             object;
+       vm_map_offset_t         map_offset;
+       vm_map_entry_t          entry;
+       ppnum_t                 phys_page = 0;
 
        map_offset = vm_map_trunc_page(addr, PAGE_MASK);
 
        vm_map_lock(map);
        while (vm_map_lookup_entry(map, map_offset, &entry)) {
 
        map_offset = vm_map_trunc_page(addr, PAGE_MASK);
 
        vm_map_lock(map);
        while (vm_map_lookup_entry(map, map_offset, &entry)) {
-
                if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
                        vm_map_unlock(map);
                        return (ppnum_t) 0;
                }
                if (entry->is_sub_map) {
                if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
                        vm_map_unlock(map);
                        return (ppnum_t) 0;
                }
                if (entry->is_sub_map) {
-                       vm_map_t        old_map;
+                       vm_map_t        old_map;
                        vm_map_lock(VME_SUBMAP(entry));
                        old_map = map;
                        map = VME_SUBMAP(entry);
                        map_offset = (VME_OFFSET(entry) +
                        vm_map_lock(VME_SUBMAP(entry));
                        old_map = map;
                        map = VME_SUBMAP(entry);
                        map_offset = (VME_OFFSET(entry) +
-                                     (map_offset - entry->vme_start));
+                           (map_offset - entry->vme_start));
                        vm_map_unlock(old_map);
                        continue;
                }
                        vm_map_unlock(old_map);
                        continue;
                }
@@ -3585,26 +4230,26 @@ vm_map_get_phys_page(
                        if (VME_OBJECT(entry)->vo_shadow_offset == 0) {
                                /* need to call vm_fault */
                                vm_map_unlock(map);
                        if (VME_OBJECT(entry)->vo_shadow_offset == 0) {
                                /* need to call vm_fault */
                                vm_map_unlock(map);
-                               vm_fault(map, map_offset, VM_PROT_NONE, 
-                                       FALSE, THREAD_UNINT, NULL, 0);
+                               vm_fault(map, map_offset, VM_PROT_NONE,
+                                   FALSE /* change_wiring */, VM_KERN_MEMORY_NONE,
+                                   THREAD_UNINT, NULL, 0);
                                vm_map_lock(map);
                                continue;
                        }
                        offset = (VME_OFFSET(entry) +
                                vm_map_lock(map);
                                continue;
                        }
                        offset = (VME_OFFSET(entry) +
-                                 (map_offset - entry->vme_start));
+                           (map_offset - entry->vme_start));
                        phys_page = (ppnum_t)
                        phys_page = (ppnum_t)
-                               ((VME_OBJECT(entry)->vo_shadow_offset 
-                                 + offset) >> PAGE_SHIFT);
+                           ((VME_OBJECT(entry)->vo_shadow_offset
+                           + offset) >> PAGE_SHIFT);
                        break;
                        break;
-                       
                }
                offset = (VME_OFFSET(entry) + (map_offset - entry->vme_start));
                object = VME_OBJECT(entry);
                vm_object_lock(object);
                while (TRUE) {
                }
                offset = (VME_OFFSET(entry) + (map_offset - entry->vme_start));
                object = VME_OBJECT(entry);
                vm_object_lock(object);
                while (TRUE) {
-                       vm_page_t dst_page = vm_page_lookup(object,offset);
-                       if(dst_page == VM_PAGE_NULL) {
-                               if(object->shadow) {
+                       vm_page_t dst_page = vm_page_lookup(object, offset);
+                       if (dst_page == VM_PAGE_NULL) {
+                               if (object->shadow) {
                                        vm_object_t old_object;
                                        vm_object_lock(object->shadow);
                                        old_object = object;
                                        vm_object_t old_object;
                                        vm_object_lock(object->shadow);
                                        old_object = object;
@@ -3622,43 +4267,36 @@ vm_map_get_phys_page(
                        }
                }
                break;
                        }
                }
                break;
-
-       } 
+       }
 
        vm_map_unlock(map);
        return phys_page;
 }
 
 
        vm_map_unlock(map);
        return phys_page;
 }
 
-void
-vm_user_init(void)
-{
-       dp_control_port_init();
-}
-
 #if 0
 #if 0
-kern_return_t kernel_object_iopl_request(      /* forward */
-       vm_named_entry_t        named_entry,
-       memory_object_offset_t  offset,
-       upl_size_t              *upl_size,
-       upl_t                   *upl_ptr,
-       upl_page_info_array_t   user_page_list,
-       unsigned int            *page_list_count,
-       int                     *flags);
+kern_return_t kernel_object_iopl_request(       /* forward */
+       vm_named_entry_t        named_entry,
+       memory_object_offset_t  offset,
+       upl_size_t              *upl_size,
+       upl_t                   *upl_ptr,
+       upl_page_info_array_t   user_page_list,
+       unsigned int            *page_list_count,
+       int                     *flags);
 
 kern_return_t
 kernel_object_iopl_request(
 
 kern_return_t
 kernel_object_iopl_request(
-       vm_named_entry_t        named_entry,
-       memory_object_offset_t  offset,
-       upl_size_t              *upl_size,
-       upl_t                   *upl_ptr,
-       upl_page_info_array_t   user_page_list,
-       unsigned int            *page_list_count,
-       int                     *flags)
+       vm_named_entry_t        named_entry,
+       memory_object_offset_t  offset,
+       upl_size_t              *upl_size,
+       upl_t                   *upl_ptr,
+       upl_page_info_array_t   user_page_list,
+       unsigned int            *page_list_count,
+       int                     *flags)
 {
 {
-       vm_object_t             object;
-       kern_return_t           ret;
+       vm_object_t             object;
+       kern_return_t           ret;
 
 
-       int                     caller_flags;
+       int                     caller_flags;
 
        caller_flags = *flags;
 
 
        caller_flags = *flags;
 
@@ -3671,84 +4309,57 @@ kernel_object_iopl_request(
        }
 
        /* a few checks to make sure user is obeying rules */
        }
 
        /* a few checks to make sure user is obeying rules */
-       if(*upl_size == 0) {
-               if(offset >= named_entry->size)
-                       return(KERN_INVALID_RIGHT);
+       if (*upl_size == 0) {
+               if (offset >= named_entry->size) {
+                       return KERN_INVALID_RIGHT;
+               }
                *upl_size = (upl_size_t) (named_entry->size - offset);
                *upl_size = (upl_size_t) (named_entry->size - offset);
-               if (*upl_size != named_entry->size - offset)
+               if (*upl_size != named_entry->size - offset) {
                        return KERN_INVALID_ARGUMENT;
                        return KERN_INVALID_ARGUMENT;
+               }
        }
        }
-       if(caller_flags & UPL_COPYOUT_FROM) {
-               if((named_entry->protection & VM_PROT_READ) 
-                                       != VM_PROT_READ) {
-                       return(KERN_INVALID_RIGHT);
+       if (caller_flags & UPL_COPYOUT_FROM) {
+               if ((named_entry->protection & VM_PROT_READ)
+                   != VM_PROT_READ) {
+                       return KERN_INVALID_RIGHT;
                }
        } else {
                }
        } else {
-               if((named_entry->protection & 
-                       (VM_PROT_READ | VM_PROT_WRITE)) 
-                       != (VM_PROT_READ | VM_PROT_WRITE)) {
-                       return(KERN_INVALID_RIGHT);
+               if ((named_entry->protection &
+                   (VM_PROT_READ | VM_PROT_WRITE))
+                   != (VM_PROT_READ | VM_PROT_WRITE)) {
+                       return KERN_INVALID_RIGHT;
                }
        }
                }
        }
-       if(named_entry->size < (offset + *upl_size))
-               return(KERN_INVALID_ARGUMENT);
+       if (named_entry->size < (offset + *upl_size)) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
        /* the callers parameter offset is defined to be the */
        /* offset from beginning of named entry offset in object */
        offset = offset + named_entry->offset;
 
        if (named_entry->is_sub_map ||
 
        /* the callers parameter offset is defined to be the */
        /* offset from beginning of named entry offset in object */
        offset = offset + named_entry->offset;
 
        if (named_entry->is_sub_map ||
-           named_entry->is_copy)
+           named_entry->is_copy) {
                return KERN_INVALID_ARGUMENT;
                return KERN_INVALID_ARGUMENT;
-               
-       named_entry_lock(named_entry);
-
-       if (named_entry->is_pager) {
-               object = vm_object_enter(named_entry->backing.pager, 
-                               named_entry->offset + named_entry->size, 
-                               named_entry->internal, 
-                               FALSE,
-                               FALSE);
-               if (object == VM_OBJECT_NULL) {
-                       named_entry_unlock(named_entry);
-                       return(KERN_INVALID_OBJECT);
-               }
-
-               /* JMM - drop reference on the pager here? */
-
-               /* create an extra reference for the object */
-               vm_object_lock(object);
-               vm_object_reference_locked(object);
-               named_entry->backing.object = object;
-               named_entry->is_pager = FALSE;
-               named_entry_unlock(named_entry);
+       }
 
 
-               /* wait for object (if any) to be ready */
-               if (!named_entry->internal) {
-                       while (!object->pager_ready) {
-                               vm_object_wait(object,
-                                              VM_OBJECT_EVENT_PAGER_READY,
-                                              THREAD_UNINT);
-                               vm_object_lock(object);
-                       }
-               }
-               vm_object_unlock(object);
+       named_entry_lock(named_entry);
 
 
-       } else {
-               /* This is the case where we are going to operate */
-               /* an an already known object.  If the object is */
-               /* not ready it is internal.  An external     */
-               /* object cannot be mapped until it is ready  */
-               /* we can therefore avoid the ready check     */
-               /* in this case.  */
-               object = named_entry->backing.object;
-               vm_object_reference(object);
-               named_entry_unlock(named_entry);
-       }
+       /* This is the case where we are going to operate */
+       /* on an already known object.  If the object is */
+       /* not ready it is internal.  An external     */
+       /* object cannot be mapped until it is ready  */
+       /* we can therefore avoid the ready check     */
+       /* in this case.  */
+       assert(named_entry->is_object);
+       object = vm_named_entry_to_vm_object(named_entry);
+       vm_object_reference(object);
+       named_entry_unlock(named_entry);
 
        if (!object->private) {
 
        if (!object->private) {
-               if (*upl_size > MAX_UPL_TRANSFER_BYTES)
+               if (*upl_size > MAX_UPL_TRANSFER_BYTES) {
                        *upl_size = MAX_UPL_TRANSFER_BYTES;
                        *upl_size = MAX_UPL_TRANSFER_BYTES;
+               }
                if (object->phys_contiguous) {
                        *flags = UPL_PHYS_CONTIG;
                } else {
                if (object->phys_contiguous) {
                        *flags = UPL_PHYS_CONTIG;
                } else {
@@ -3759,13 +4370,123 @@ kernel_object_iopl_request(
        }
 
        ret = vm_object_iopl_request(object,
        }
 
        ret = vm_object_iopl_request(object,
-                                    offset,
-                                    *upl_size,
-                                    upl_ptr,
-                                    user_page_list,
-                                    page_list_count,
-                                    (upl_control_flags_t)(unsigned int)caller_flags);
+           offset,
+           *upl_size,
+           upl_ptr,
+           user_page_list,
+           page_list_count,
+           (upl_control_flags_t)(unsigned int)caller_flags);
        vm_object_deallocate(object);
        return ret;
 }
 #endif
        vm_object_deallocate(object);
        return ret;
 }
 #endif
+
+/*
+ * These symbols are looked up at runtime by vmware, VirtualBox,
+ * despite not being exported in the symbol sets.
+ */
+
+#if defined(__x86_64__)
+
+kern_return_t
+mach_vm_map(
+       vm_map_t                target_map,
+       mach_vm_offset_t        *address,
+       mach_vm_size_t  initial_size,
+       mach_vm_offset_t        mask,
+       int                     flags,
+       ipc_port_t              port,
+       vm_object_offset_t      offset,
+       boolean_t               copy,
+       vm_prot_t               cur_protection,
+       vm_prot_t               max_protection,
+       vm_inherit_t            inheritance);
+
+kern_return_t
+mach_vm_remap(
+       vm_map_t                target_map,
+       mach_vm_offset_t        *address,
+       mach_vm_size_t  size,
+       mach_vm_offset_t        mask,
+       int                     flags,
+       vm_map_t                src_map,
+       mach_vm_offset_t        memory_address,
+       boolean_t               copy,
+       vm_prot_t               *cur_protection,
+       vm_prot_t               *max_protection,
+       vm_inherit_t            inheritance);
+
+kern_return_t
+mach_vm_map(
+       vm_map_t                target_map,
+       mach_vm_offset_t        *address,
+       mach_vm_size_t  initial_size,
+       mach_vm_offset_t        mask,
+       int                     flags,
+       ipc_port_t              port,
+       vm_object_offset_t      offset,
+       boolean_t               copy,
+       vm_prot_t               cur_protection,
+       vm_prot_t               max_protection,
+       vm_inherit_t            inheritance)
+{
+       return mach_vm_map_external(target_map, address, initial_size, mask, flags, port,
+                  offset, copy, cur_protection, max_protection, inheritance);
+}
+
+kern_return_t
+mach_vm_remap(
+       vm_map_t                target_map,
+       mach_vm_offset_t        *address,
+       mach_vm_size_t  size,
+       mach_vm_offset_t        mask,
+       int                     flags,
+       vm_map_t                src_map,
+       mach_vm_offset_t        memory_address,
+       boolean_t               copy,
+       vm_prot_t               *cur_protection,   /* OUT */
+       vm_prot_t               *max_protection,   /* OUT */
+       vm_inherit_t            inheritance)
+{
+       return mach_vm_remap_external(target_map, address, size, mask, flags, src_map, memory_address,
+                  copy, cur_protection, max_protection, inheritance);
+}
+
+kern_return_t
+vm_map(
+       vm_map_t                target_map,
+       vm_offset_t             *address,
+       vm_size_t               size,
+       vm_offset_t             mask,
+       int                     flags,
+       ipc_port_t              port,
+       vm_offset_t             offset,
+       boolean_t               copy,
+       vm_prot_t               cur_protection,
+       vm_prot_t               max_protection,
+       vm_inherit_t            inheritance);
+
+kern_return_t
+vm_map(
+       vm_map_t                target_map,
+       vm_offset_t             *address,
+       vm_size_t               size,
+       vm_offset_t             mask,
+       int                     flags,
+       ipc_port_t              port,
+       vm_offset_t             offset,
+       boolean_t               copy,
+       vm_prot_t               cur_protection,
+       vm_prot_t               max_protection,
+       vm_inherit_t            inheritance)
+{
+       vm_tag_t tag;
+
+       VM_GET_FLAGS_ALIAS(flags, tag);
+       return vm_map_kernel(target_map, address, size, mask,
+                  flags, VM_MAP_KERNEL_FLAGS_NONE, tag,
+                  port, offset, copy,
+                  cur_protection, max_protection, inheritance);
+}
+
+#endif /* __x86_64__ */