]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_map.h
xnu-4903.221.2.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map.h
index 07ed5ad3aac148169f9471cb764a59907224f989..44cef715dae6faaf76c4fa061d21094f14b87aa7 100644 (file)
@@ -91,10 +91,12 @@ extern vm_map_t current_map(void);
 
 /* Setup reserved areas in a new VM map */
 extern kern_return_t   vm_map_exec(
-                               vm_map_t                new_map,
-                               task_t                  task,
-                               void                    *fsroot,
-                               cpu_type_t              cpu);
+       vm_map_t                new_map,
+       task_t                  task,
+       boolean_t               is64bit,
+       void                    *fsroot,
+       cpu_type_t              cpu,
+       cpu_subtype_t           cpu_subtype);
 
 __END_DECLS
 
@@ -146,6 +148,9 @@ typedef union vm_map_object {
 #define named_entry_lock_destroy(object)       lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
 #define named_entry_lock(object)               lck_mtx_lock(&(object)->Lock)
 #define named_entry_unlock(object)             lck_mtx_unlock(&(object)->Lock)   
+#if VM_NAMED_ENTRY_LIST
+extern queue_head_t vm_named_entry_list;
+#endif /* VM_NAMED_ENTRY_LIST */
 
 /*
  *     Type:           vm_named_entry_t [internal use only]
@@ -169,7 +174,6 @@ struct vm_named_entry {
        decl_lck_mtx_data(,     Lock)           /* Synchronization */
        union {
                vm_object_t     object;         /* object I point to */
-               memory_object_t pager;          /* amo pager port */
                vm_map_t        map;            /* map backing submap */
                vm_map_copy_t   copy;           /* a VM map copy */
        } backing;
@@ -181,8 +185,14 @@ struct vm_named_entry {
        unsigned int                            /* Is backing.xxx : */
        /* boolean_t */         internal:1,     /* ... an internal object */
        /* boolean_t */         is_sub_map:1,   /* ... a submap? */
-       /* boolean_t */         is_pager:1,     /* ... a pager port */
        /* boolean_t */         is_copy:1;      /* ... a VM map copy */
+#if VM_NAMED_ENTRY_LIST
+       queue_chain_t           named_entry_list;
+       int                     named_entry_alias;
+       mach_port_t             named_entry_port;
+#define NAMED_ENTRY_BT_DEPTH 16
+       void                    *named_entry_bt[NAMED_ENTRY_BT_DEPTH];
+#endif /* VM_NAMED_ENTRY_LIST */
 };
 
 /*
@@ -239,6 +249,7 @@ struct vm_map_links {
        vm_object_shadow(&__object, &__offset, (length));       \
        if (__object != VME_OBJECT((entry))) {                  \
                VME_OBJECT_SET((entry), __object);              \
+               (entry)->use_pmap = TRUE;                       \
        }                                                       \
        if (__offset != VME_OFFSET((entry))) {                  \
                VME_OFFSET_SET((entry), __offset);              \
@@ -254,6 +265,35 @@ struct vm_map_links {
        (entry)->vme_offset = __offset | ((alias) & VME_ALIAS_MASK);    \
        MACRO_END
 
+/*
+ * FOOTPRINT ACCOUNTING:
+ * The "memory footprint" is better described in the pmap layer.
+ *
+ * At the VM level, these 2 vm_map_entry_t fields are relevant:
+ * iokit_mapped:
+ *     For an "iokit_mapped" entry, we add the size of the entry to the
+ *     footprint when the entry is entered into the map and we subtract that
+ *     size when the entry is removed.  No other accounting should take place.
+ *     "use_pmap" should be FALSE but is not taken into account.
+ * use_pmap: (only when is_sub_map is FALSE)
+ *     This indicates if we should ask the pmap layer to account for pages
+ *     in this mapping.  If FALSE, we expect that another form of accounting
+ *     is being used (e.g. "iokit_mapped" or the explicit accounting of
+ *     non-volatile purgable memory).
+ *
+ * So the logic is mostly:
+ * if entry->is_sub_map == TRUE
+ *     anything in a submap does not count for the footprint
+ * else if entry->iokit_mapped == TRUE
+ *     footprint includes the entire virtual size of this entry
+ * else if entry->use_pmap == FALSE
+ *     tell pmap NOT to account for pages being pmap_enter()'d from this
+ *     mapping (i.e. use "alternate accounting")
+ * else
+ *     pmap will account for pages being pmap_enter()'d from this mapping
+ *     as it sees fit (only if anonymous, etc...)
+ */
+
 struct vm_map_entry {
        struct vm_map_links     links;          /* links to other entries */
 #define vme_prev               links.prev
@@ -294,6 +334,7 @@ struct vm_map_entry {
                                             * this entry it is being deleted
                                             * without unwiring them */
        /* boolean_t */ used_for_jit:1,
+       /* boolean_t */ pmap_cs_associated:1, /* pmap_cs will validate */
        /* boolean_t */ from_reserved_zone:1, /* Allocated from
                                               * kernel reserved zone    */
 
@@ -301,7 +342,8 @@ struct vm_map_entry {
        /* boolean_t */ iokit_acct:1,
        /* boolean_t */ vme_resilient_codesign:1,
        /* boolean_t */ vme_resilient_media:1,
-               __unused:6;
+       /* boolean_t */ vme_atomic:1, /* entry cannot be split/coalesced */
+               __unused:4;
 ;
 
        unsigned short          wired_count;    /* can be paged if = 0 */
@@ -349,7 +391,6 @@ struct vm_map_header {
        int                     nentries;       /* Number of entries */
        boolean_t               entries_pageable;
                                                /* are map entries pageable? */
-       vm_map_offset_t         highest_entry_end_addr; /* The ending address of the highest allocated vm_entry_t */
 #ifdef VM_MAP_STORE_USE_RB
        struct rb_head  rb_head_store;
 #endif
@@ -376,32 +417,58 @@ struct vm_map_header {
  *             quickly find free space.
  */
 struct _vm_map {
-       lck_rw_t                        lock;           /* map lock */
+       lck_rw_t                lock;           /* map lock */
        struct vm_map_header    hdr;            /* Map entry header */
 #define min_offset             hdr.links.start /* start of range */
 #define max_offset             hdr.links.end   /* end of range */
-#define highest_entry_end      hdr.highest_entry_end_addr
        pmap_t                  pmap;           /* Physical map */
        vm_map_size_t           size;           /* virtual size */
        vm_map_size_t           user_wire_limit;/* rlimit on user locked memory */
        vm_map_size_t           user_wire_size; /* current size of user locked memory in this map */
-       int                     ref_count;      /* Reference count */
-#if    TASK_SWAPPER
-       int                     res_count;      /* Residence count (swap) */
-       int                     sw_state;       /* Swap state */
-#endif /* TASK_SWAPPER */
+#if __x86_64__
+       vm_map_offset_t         vmmap_high_start;
+#endif /* __x86_64__ */
+
+       union {
+               /*
+                * If map->disable_vmentry_reuse == TRUE:
+                * the end address of the highest allocated vm_map_entry_t.
+                */
+               vm_map_offset_t         vmu1_highest_entry_end;
+               /*
+                * For a nested VM map:
+                * the lowest address in this nested VM map that we would
+                * expect to be unnested under normal operation (i.e. for
+                * regular copy-on-write on DATA section).
+                */
+               vm_map_offset_t         vmu1_lowest_unnestable_start;
+       } vmu1;
+#define highest_entry_end      vmu1.vmu1_highest_entry_end
+#define lowest_unnestable_start        vmu1.vmu1_lowest_unnestable_start
        decl_lck_mtx_data(,     s_lock)         /* Lock ref, res fields */
        lck_mtx_ext_t           s_lock_ext;
        vm_map_entry_t          hint;           /* hint for quick lookups */
-       struct vm_map_links*    hole_hint;      /* hint for quick hole lookups */
+       union {
+               struct vm_map_links* vmmap_hole_hint;   /* hint for quick hole lookups */
+               struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
+       } vmmap_u_1;
+#define hole_hint vmmap_u_1.vmmap_hole_hint
+#define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
        union{
                vm_map_entry_t          _first_free;    /* First free space hint */
                struct vm_map_links*    _holes;         /* links all holes between entries */
-       }f_s;                                           /* Union for free space data structures being used */
+       } f_s;                                          /* Union for free space data structures being used */
 
 #define first_free             f_s._first_free
 #define holes_list             f_s._holes
 
+       int                     map_refcnt;     /* Reference count */
+
+#if    TASK_SWAPPER
+       int                     res_count;      /* Residence count (swap) */
+       int                     sw_state;       /* Swap state */
+#endif /* TASK_SWAPPER */
+
        unsigned int            
        /* boolean_t */         wait_for_space:1, /* Should callers wait for space? */
        /* boolean_t */         wiring_required:1, /* All memory wired? */
@@ -411,16 +478,17 @@ struct _vm_map {
        /* boolean_t */         disable_vmentry_reuse:1, /*  All vm entries should keep using newer and higher addresses in the map */ 
        /* boolean_t */         map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
        /* boolean_t */         holelistenabled:1,
-       /* reserved */          pad:24;
+       /* boolean_t */         is_nested_map:1,
+       /* boolean_t */         map_disallow_new_exec:1, /* Disallow new executable code */
+       /* boolean_t */         jit_entry_exists:1,
+       /* boolean_t */         has_corpse_footprint:1,
+       /* boolean_t */         warned_delete_gap:1,
+       /* reserved */          pad:19;
        unsigned int            timestamp;      /* Version number */
-       unsigned int            color_rr;       /* next color (not protected by a lock) */
-#if CONFIG_FREEZE
-       void                    *default_freezer_handle;
-#endif
-       boolean_t               jit_entry_exists;
-} ;
+};
 
-#define vm_map_to_entry(map)   ((struct vm_map_entry *) &(map)->hdr.links)
+#define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
+#define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
 #define vm_map_first_entry(map)        ((map)->hdr.links.next)
 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
 
@@ -514,8 +582,7 @@ struct vm_map_copy {
  *     Useful macros for entry list copy objects
  */
 
-#define vm_map_copy_to_entry(copy)             \
-               ((struct vm_map_entry *) &(copy)->cpy_hdr.links)
+#define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
 #define vm_map_copy_first_entry(copy)          \
                ((copy)->cpy_hdr.links.next)
 #define vm_map_copy_last_entry(copy)           \
@@ -545,6 +612,9 @@ struct vm_map_copy {
  */
 #define vm_map_lock_read_to_write(map) (lck_rw_lock_shared_to_exclusive(&(map)->lock) != TRUE)
 
+#define vm_map_try_lock(map)           lck_rw_try_lock_exclusive(&(map)->lock)
+#define vm_map_try_lock_read(map)      lck_rw_try_lock_shared(&(map)->lock)
+
 #if MACH_ASSERT || DEBUG
 #define vm_map_lock_assert_held(map) \
        lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
@@ -578,6 +648,8 @@ extern kern_return_t vm_map_find_space(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                vm_map_entry_t          *o_entry);      /* OUT */
 
 extern void vm_map_clip_start(
@@ -645,7 +717,9 @@ extern vm_map_entry_t       vm_map_entry_insert(
                                boolean_t               permanent,
                                unsigned int            superpage_size,
                                boolean_t               clear_map_aligned,
-                               boolean_t               is_submap);
+                               boolean_t               is_submap,
+                               boolean_t               used_for_jit,
+                               int                     alias);
 
 
 /*
@@ -662,11 +736,6 @@ extern vm_map_entry_t      vm_map_entry_insert(
                                                /* Physical map associated
                                                 * with this address map */
 
-#define                vm_map_verify_done(map, version)    vm_map_unlock_read(map)
-                                               /* Operation that required
-                                                * a verified lookup is
-                                                * now complete */
-
 /*
  * Macros/functions for map residence counts and swapin/out of vm maps
  */
@@ -694,7 +763,7 @@ MACRO_BEGIN                                 \
        if (Map) {                              \
                lck_mtx_lock(&Map->s_lock);     \
                Map->res_count++;               \
-               Map->ref_count++;               \
+               Map->map_refcnt++;              \
                lck_mtx_unlock(&Map->s_lock);   \
        }                                       \
 MACRO_END
@@ -729,7 +798,7 @@ MACRO_END
 MACRO_BEGIN                            \
        vm_map_t Map = (map);           \
        lck_mtx_lock(&Map->s_lock);     \
-       ++Map->ref_count;               \
+       ++Map->map_refcnt;              \
        vm_map_res_reference(Map);      \
        lck_mtx_unlock(&Map->s_lock);   \
 MACRO_END
@@ -748,7 +817,7 @@ MACRO_BEGIN                                 \
        vm_map_t Map = (map);                   \
        if (Map) {                              \
                lck_mtx_lock(&Map->s_lock);     \
-               Map->ref_count++;               \
+               Map->map_refcnt++;              \
                lck_mtx_unlock(&Map->s_lock);   \
        }                                       \
 MACRO_END
@@ -788,7 +857,7 @@ extern vm_object_t  vm_submap_object;
 
 #define        vm_map_dealloc_fast(map)                \
        MACRO_BEGIN                                     \
-       register int c;                         \
+       int c;                                          \
                                                        \
        lck_mtx_lock(&map->s_lock);                     \
        c = --map->ref_count;                   \
@@ -831,6 +900,8 @@ extern kern_return_t        vm_map_enter(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                vm_object_t             object,
                                vm_object_offset_t      offset,
                                boolean_t               needs_copy,
@@ -838,6 +909,22 @@ extern kern_return_t       vm_map_enter(
                                vm_prot_t               max_protection,
                                vm_inherit_t            inheritance);
 
+#if __arm64__
+extern kern_return_t   vm_map_enter_fourk(
+                               vm_map_t                map,
+                               vm_map_offset_t         *address,
+                               vm_map_size_t           size,
+                               vm_map_offset_t         mask,
+                               int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
+                               vm_object_t             object,
+                               vm_object_offset_t      offset,
+                               boolean_t               needs_copy,
+                               vm_prot_t               cur_protection,
+                               vm_prot_t               max_protection,
+                               vm_inherit_t            inheritance);
+#endif /* __arm64__ */
 
 /* XXX should go away - replaced with regular enter of contig object */
 extern  kern_return_t  vm_map_enter_cpm(
@@ -852,6 +939,8 @@ extern kern_return_t vm_map_remap(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                vm_map_t                src_map,
                                vm_map_offset_t         memory_address,
                                boolean_t               copy,
@@ -878,7 +967,11 @@ extern     kern_return_t   vm_map_read_user(
 /* Create a new task map using an existing task map as a template. */
 extern vm_map_t                vm_map_fork(
                                ledger_t                ledger,
-                               vm_map_t                old_map);
+                               vm_map_t                old_map,
+                               int                     options);
+#define VM_MAP_FORK_SHARE_IF_INHERIT_NONE      0x00000001
+#define VM_MAP_FORK_PRESERVE_PURGEABLE         0x00000002
+#define VM_MAP_FORK_CORPSE_FOOTPRINT           0x00000004
 
 /* Change inheritance */
 extern kern_return_t   vm_map_inherit(
@@ -908,12 +1001,6 @@ extern kern_return_t      vm_map_behavior_set(
                                vm_map_offset_t         end,
                                vm_behavior_t           new_behavior);
 
-extern kern_return_t vm_map_purgable_control(
-                               vm_map_t                map,
-                               vm_map_offset_t         address,
-                               vm_purgable_t           control,
-                               int                     *state);
-
 extern kern_return_t vm_map_region(
                                vm_map_t                 map,
                                vm_map_offset_t         *address,
@@ -981,24 +1068,65 @@ extern kern_return_t vm_map_set_cache_attr(
 
 extern int override_nx(vm_map_t map, uint32_t user_tag);
 
-extern int vm_map_purge(vm_map_t map);
-
-
-/* kext exported versions */
-
-extern kern_return_t vm_map_wire_external(
-       register vm_map_t       map,
-       register vm_map_offset_t        start,
-       register vm_map_offset_t        end,
-       register vm_prot_t      caller_prot,
-       boolean_t               user_wire);
-
-extern kern_return_t vm_map_wire_and_extract_external(
+#if PMAP_CS
+extern kern_return_t vm_map_entry_cs_associate(
+       vm_map_t                map,
+       vm_map_entry_t          entry,
+       vm_map_kernel_flags_t   vmk_flags);
+#endif /* PMAP_CS */
+
+extern void vm_map_region_top_walk(
+        vm_map_entry_t entry,
+       vm_region_top_info_t top);
+extern void vm_map_region_walk(
+       vm_map_t map,
+       vm_map_offset_t va,
+       vm_map_entry_t entry,
+       vm_object_offset_t offset,
+       vm_object_size_t range,
+       vm_region_extended_info_t extended,
+       boolean_t look_for_pages,
+       mach_msg_type_number_t count);
+
+
+struct vm_map_corpse_footprint_header {
+       vm_size_t       cf_size;        /* allocated buffer size */
+       uint32_t        cf_last_region; /* offset of last region in buffer */
+       union {
+               uint32_t cfu_last_zeroes; /* during creation:
+                                         * number of "zero" dispositions at
+                                         * end of last region */
+               uint32_t cfu_hint_region; /* during lookup:
+                                         * offset of last looked up region */
+#define cf_last_zeroes cfu.cfu_last_zeroes
+#define cf_hint_region cfu.cfu_hint_region
+       } cfu;
+};
+struct vm_map_corpse_footprint_region {
+       vm_map_offset_t cfr_vaddr;      /* region start virtual address */
+       uint32_t        cfr_num_pages;  /* number of pages in this "region" */
+       unsigned char   cfr_disposition[0];     /* disposition of each page */
+} __attribute__((packed));
+
+extern kern_return_t vm_map_corpse_footprint_collect(
+       vm_map_t        old_map,
+       vm_map_entry_t  old_entry,
+       vm_map_t        new_map);
+extern void vm_map_corpse_footprint_collect_done(
+       vm_map_t        new_map);
+
+extern kern_return_t vm_map_corpse_footprint_query_page_info(
        vm_map_t        map,
-       vm_map_offset_t start,
-       vm_prot_t       caller_prot,
-       boolean_t       user_wire,
-       ppnum_t         *physpage_p);
+       vm_map_offset_t va,
+       int             *disp);
+
+extern void vm_map_copy_footprint_ledgers(
+       task_t  old_task,
+       task_t  new_task);
+extern void vm_map_copy_ledger(
+       task_t  old_task,
+       task_t  new_task,
+       int     ledger_entry);
 
 #endif /* MACH_KERNEL_PRIVATE */
 
@@ -1010,6 +1138,15 @@ extern vm_map_t          vm_map_create(
                                vm_map_offset_t         min_off,
                                vm_map_offset_t         max_off,
                                boolean_t               pageable);
+extern vm_map_t vm_map_create_options(
+       pmap_t                  pmap,
+       vm_map_offset_t         min_off,
+       vm_map_offset_t         max_off,
+       int                     options);
+#define VM_MAP_CREATE_PAGEABLE         0x00000001
+#define VM_MAP_CREATE_CORPSE_FOOTPRINT 0x00000002
+#define VM_MAP_CREATE_ALL_OPTIONS (VM_MAP_CREATE_PAGEABLE | \
+                                  VM_MAP_CREATE_CORPSE_FOOTPRINT)
 
 extern void            vm_map_disable_hole_optimization(vm_map_t map);
 
@@ -1041,6 +1178,43 @@ extern boolean_t vm_map_check_protection(
                                vm_prot_t               protection);
 
 /* wire down a region */
+
+#ifdef XNU_KERNEL_PRIVATE
+
+extern kern_return_t   vm_map_wire_kernel(
+                               vm_map_t                map,
+                               vm_map_offset_t         start,
+                               vm_map_offset_t         end,
+                               vm_prot_t               access_type,
+                               vm_tag_t                tag,
+                               boolean_t               user_wire);
+
+extern kern_return_t   vm_map_wire_and_extract_kernel(
+                               vm_map_t                map,
+                               vm_map_offset_t         start,
+                               vm_prot_t               access_type,
+                               vm_tag_t                tag,
+                               boolean_t               user_wire,
+                               ppnum_t                 *physpage_p);
+
+/* kext exported versions */
+
+extern kern_return_t   vm_map_wire_external(
+                               vm_map_t                map,
+                               vm_map_offset_t         start,
+                               vm_map_offset_t         end,
+                               vm_prot_t               access_type,
+                               boolean_t               user_wire);
+
+extern kern_return_t   vm_map_wire_and_extract_external(
+                               vm_map_t                map,
+                               vm_map_offset_t         start,
+                               vm_prot_t               access_type,
+                               boolean_t               user_wire,
+                               ppnum_t                 *physpage_p);
+
+#else /* XNU_KERNEL_PRIVATE */
+
 extern kern_return_t   vm_map_wire(
                                vm_map_t                map,
                                vm_map_offset_t         start,
@@ -1055,6 +1229,8 @@ extern kern_return_t      vm_map_wire_and_extract(
                                boolean_t               user_wire,
                                ppnum_t                 *physpage_p);
 
+#endif /* !XNU_KERNEL_PRIVATE */
+
 /* unwire a region */
 extern kern_return_t   vm_map_unwire(
                                vm_map_t                map,
@@ -1062,6 +1238,8 @@ extern kern_return_t      vm_map_unwire(
                                vm_map_offset_t         end,
                                boolean_t               user_wire);
 
+#ifdef XNU_KERNEL_PRIVATE
+
 /* Enter a mapping of a memory object */
 extern kern_return_t   vm_map_enter_mem_object(
                                vm_map_t                map,
@@ -1069,6 +1247,8 @@ extern kern_return_t      vm_map_enter_mem_object(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                ipc_port_t              port,
                                vm_object_offset_t      offset,
                                boolean_t               needs_copy,
@@ -1083,6 +1263,8 @@ extern kern_return_t      vm_map_enter_mem_object_prefault(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                ipc_port_t              port,
                                vm_object_offset_t      offset,
                                vm_prot_t               cur_protection,
@@ -1097,6 +1279,8 @@ extern kern_return_t      vm_map_enter_mem_object_control(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                memory_object_control_t control,
                                vm_object_offset_t      offset,
                                boolean_t               needs_copy,
@@ -1104,6 +1288,8 @@ extern kern_return_t      vm_map_enter_mem_object_control(
                                vm_prot_t               max_protection,
                                vm_inherit_t            inheritance);
 
+#endif /* !XNU_KERNEL_PRIVATE */
+
 /* Deallocate a region */
 extern kern_return_t   vm_map_remove(
                                vm_map_t                map,
@@ -1111,6 +1297,13 @@ extern kern_return_t     vm_map_remove(
                                vm_map_offset_t         end,
                                boolean_t               flags);
 
+/* Deallocate a region when the map is already locked */
+extern kern_return_t   vm_map_remove_locked(
+                               vm_map_t        map,
+                               vm_map_offset_t     start,
+                               vm_map_offset_t     end,
+                               boolean_t       flags);
+
 /* Discard a copy without using it */
 extern void            vm_map_copy_discard(
                                vm_map_copy_t           copy);
@@ -1122,16 +1315,29 @@ extern kern_return_t    vm_map_copy_overwrite(
                                vm_map_copy_t           copy,
                                boolean_t               interruptible);
 
+/* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
+extern boolean_t       vm_map_copy_validate_size(
+                               vm_map_t                dst_map,
+                               vm_map_copy_t           copy,
+                               vm_map_size_t           *size);
+
 /* Place a copy into a map */
 extern kern_return_t   vm_map_copyout(
                                vm_map_t                dst_map,
                                vm_map_address_t        *dst_addr,      /* OUT */
                                vm_map_copy_t           copy);
 
+extern kern_return_t vm_map_copyout_size(
+                               vm_map_t                dst_map,
+                               vm_map_address_t        *dst_addr,      /* OUT */
+                               vm_map_copy_t           copy,
+                               vm_map_size_t           copy_size);
+
 extern kern_return_t   vm_map_copyout_internal(
        vm_map_t                dst_map,
        vm_map_address_t        *dst_addr,      /* OUT */
        vm_map_copy_t           copy,
+       vm_map_size_t           copy_size,
        boolean_t               consume_on_success,
        vm_prot_t               cur_protection,
        vm_prot_t               max_protection,
@@ -1153,6 +1359,18 @@ extern kern_return_t     vm_map_copyin_common(
                                vm_map_copy_t           *copy_result,   /* OUT */
                                boolean_t               use_maxprot);
 
+#define VM_MAP_COPYIN_SRC_DESTROY      0x00000001
+#define VM_MAP_COPYIN_USE_MAXPROT      0x00000002
+#define VM_MAP_COPYIN_ENTRY_LIST       0x00000004
+#define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
+#define VM_MAP_COPYIN_ALL_FLAGS                0x0000000F
+extern kern_return_t   vm_map_copyin_internal(
+                               vm_map_t                src_map,
+                               vm_map_address_t        src_addr,
+                               vm_map_size_t           len,
+                               int                     flags,
+                               vm_map_copy_t           *copy_result); /* OUT */
+
 extern kern_return_t   vm_map_copy_extract(
        vm_map_t                src_map,
        vm_map_address_t        src_addr,
@@ -1174,12 +1392,23 @@ extern void             vm_map_set_64bit(
 extern void            vm_map_set_32bit(
                                vm_map_t                map);
 
+extern void            vm_map_set_jumbo(
+                               vm_map_t                map);
+
+extern void            vm_map_set_max_addr(
+                               vm_map_t                map, vm_map_offset_t new_max_offset);
+
 extern boolean_t       vm_map_has_hard_pagezero(
                                vm_map_t                map,
                                vm_map_offset_t         pagezero_size);
+extern void            vm_commit_pagezero_status(vm_map_t      tmap);
 
+#ifdef __arm__
+static inline boolean_t vm_map_is_64bit(__unused vm_map_t map) { return 0; }
+#else
 extern boolean_t       vm_map_is_64bit(
                                vm_map_t                map);
+#endif
 
 
 extern kern_return_t   vm_map_raise_max_offset(
@@ -1189,19 +1418,32 @@ extern kern_return_t    vm_map_raise_max_offset(
 extern kern_return_t   vm_map_raise_min_offset(
        vm_map_t        map,
        vm_map_offset_t new_min_offset);
+#if __x86_64__
+extern void vm_map_set_high_start(
+       vm_map_t        map,
+       vm_map_offset_t high_start);
+#endif /* __x86_64__ */
 
 extern vm_map_offset_t vm_compute_max_offset(
                                boolean_t               is64);
 
+extern void            vm_map_get_max_aslr_slide_section(
+                               vm_map_t                map,
+                               int64_t                 *max_sections,
+                               int64_t                 *section_size);
+
 extern uint64_t        vm_map_get_max_aslr_slide_pages(
                                vm_map_t map);
-       
+
+extern uint64_t        vm_map_get_max_loader_aslr_slide_pages(
+                               vm_map_t map);
+
 extern void            vm_map_set_user_wire_limit(
                                vm_map_t                map,
                                vm_size_t               limit);
 
 extern void vm_map_switch_protect(
-                               vm_map_t                map, 
+                               vm_map_t                map,
                                boolean_t               val);
 
 extern void vm_map_iokit_mapped_region(
@@ -1243,6 +1485,13 @@ extern kern_return_t vm_map_page_info(
        vm_page_info_flavor_t   flavor,
        vm_page_info_t          info,
        mach_msg_type_number_t  *count);
+extern kern_return_t vm_map_page_range_info_internal(
+       vm_map_t                map,
+       vm_map_offset_t         start_offset,
+       vm_map_offset_t         end_offset,
+       vm_page_info_flavor_t   flavor,
+       vm_page_info_t          info,
+       mach_msg_type_number_t  *count);
 #endif /* XNU_KERNEL_PRIVATE */
 
 
@@ -1281,6 +1530,21 @@ extern kern_return_t vm_map_page_info(
 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
 #define VM_MAP_PAGE_ALIGNED(x,pgmask) (((x) & (pgmask)) == 0)
 
+static inline void vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
+{ 
+       switch (prot) {
+               case MAP_MEM_NOOP:              break;
+               case MAP_MEM_IO:                *wimg = VM_WIMG_IO; break;
+               case MAP_MEM_COPYBACK:          *wimg = VM_WIMG_USE_DEFAULT; break;
+               case MAP_MEM_INNERWBACK:        *wimg = VM_WIMG_INNERWBACK; break;
+               case MAP_MEM_POSTED:            *wimg = VM_WIMG_POSTED; break;
+               case MAP_MEM_WTHRU:             *wimg = VM_WIMG_WTHRU; break;
+               case MAP_MEM_WCOMB:             *wimg = VM_WIMG_WCOMB; break;
+               default:
+                       panic("Unrecognized mapping type %u\n", prot);
+       }
+}
+
 #endif /* MACH_KERNEL_PRIVATE */
 
 #ifdef XNU_KERNEL_PRIVATE
@@ -1293,7 +1557,7 @@ extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
 /*
  * Flags for vm_map_remove() and vm_map_delete()
  */
-#define        VM_MAP_NO_FLAGS                 0x0
+#define        VM_MAP_REMOVE_NO_FLAGS          0x0
 #define        VM_MAP_REMOVE_KUNWIRE           0x1
 #define        VM_MAP_REMOVE_INTERRUPTIBLE     0x2
 #define        VM_MAP_REMOVE_WAIT_FOR_KWIRE    0x4
@@ -1301,9 +1565,13 @@ extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP  0x10
 #define VM_MAP_REMOVE_NO_MAP_ALIGN     0x20
 #define VM_MAP_REMOVE_NO_UNNESTING     0x40
+#define VM_MAP_REMOVE_IMMUTABLE                0x80
+#define VM_MAP_REMOVE_GAPS_OK          0x100
 
 /* Support for UPLs from vm_maps */
 
+#ifdef XNU_KERNEL_PRIVATE
+
 extern kern_return_t vm_map_get_upl(
                                vm_map_t                target_map,
                                vm_map_offset_t         map_offset,
@@ -1312,8 +1580,11 @@ extern kern_return_t vm_map_get_upl(
                                upl_page_info_array_t   page_info,
                                unsigned int            *page_infoCnt,
                                upl_control_flags_t     *flags,
+                               vm_tag_t                tag,
                                int                     force_data_sync);
 
+#endif /* XNU_KERNEL_PRIVATE */
+
 extern void
 vm_map_sizes(vm_map_t map,
                vm_map_size_t * psize,
@@ -1331,35 +1602,47 @@ extern kern_return_t vm_map_partial_reap(
                unsigned int *reclaimed_resident,
                unsigned int *reclaimed_compressed);
 
-#if CONFIG_FREEZE
-void   vm_map_freeze_thaw_init(void);
-void   vm_map_freeze_thaw(void);
-void   vm_map_demand_fault(void);
 
-extern kern_return_t vm_map_freeze_walk(
-               vm_map_t map,
-               unsigned int *purgeable_count,
-               unsigned int *wired_count,
-               unsigned int *clean_count,
-               unsigned int *dirty_count,
-               unsigned int dirty_budget,
-               boolean_t *has_shared);
+#if DEVELOPMENT || DEBUG
+
+extern int vm_map_disconnect_page_mappings(
+               vm_map_t map,
+               boolean_t);
+#endif
+
+
+#if CONFIG_FREEZE
 
 extern kern_return_t vm_map_freeze(
-               vm_map_t map,
+               vm_map_t     map,
                unsigned int *purgeable_count,
                unsigned int *wired_count,
                unsigned int *clean_count,
                unsigned int *dirty_count,
                unsigned int dirty_budget,
-               boolean_t *has_shared);
-                
-extern kern_return_t vm_map_thaw(
-                vm_map_t map);
+                unsigned int *shared_count,
+               int          *freezer_error_code,
+               boolean_t    eval_only);
+
+
+#define FREEZER_ERROR_GENERIC                  (-1)
+#define FREEZER_ERROR_EXCESS_SHARED_MEMORY     (-2)
+#define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO (-3)
+#define FREEZER_ERROR_NO_COMPRESSOR_SPACE      (-4)
+#define FREEZER_ERROR_NO_SWAP_SPACE            (-5)
+
 #endif
 
 __END_DECLS
 
+/*
+ * In some cases, we don't have a real VM object but still want to return a
+ * unique ID (to avoid a memory region looking like shared memory), so build
+ * a fake pointer based on the map's ledger and the index of the ledger being
+ * reported.
+ */
+#define INFO_MAKE_FAKE_OBJECT_ID(map,ledger_id)        ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
+
 #endif /* KERNEL_PRIVATE */
  
 #endif /* _VM_VM_MAP_H_ */