]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_map.h
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map.h
index d8ab731e9157db01391f5424417b6ef21687647e..23592b8e4d01130c82b18f0873a3b3e45dfe31dd 100644 (file)
@@ -93,6 +93,7 @@ extern vm_map_t current_map(void);
 extern kern_return_t   vm_map_exec(
                                vm_map_t                new_map,
                                task_t                  task,
+                               boolean_t               is64bit,
                                void                    *fsroot,
                                cpu_type_t              cpu);
 
@@ -105,7 +106,7 @@ __END_DECLS
 
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
-#include <kern/lock.h>
+#include <kern/locks.h>
 #include <kern/zalloc.h>
 #include <kern/macro_help.h>
 
@@ -138,11 +139,12 @@ typedef struct vm_map_entry       *vm_map_entry_t;
  *             memory object or a sub map (of the kernel map).
  */
 typedef union vm_map_object {
-       vm_object_t             vm_object;      /* object object */
-       vm_map_t                sub_map;        /* belongs to another map */
+       vm_object_t             vmo_object;     /* object object */
+       vm_map_t                vmo_submap;     /* belongs to another map */
 } vm_map_object_t;
 
 #define named_entry_lock_init(object)  lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
+#define named_entry_lock_destroy(object)       lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
 #define named_entry_lock(object)               lck_mtx_lock(&(object)->Lock)
 #define named_entry_unlock(object)             lck_mtx_unlock(&(object)->Lock)   
 
@@ -168,17 +170,18 @@ struct vm_named_entry {
        decl_lck_mtx_data(,     Lock)           /* Synchronization */
        union {
                vm_object_t     object;         /* object I point to */
-               memory_object_t pager;          /* amo pager port */
                vm_map_t        map;            /* map backing submap */
+               vm_map_copy_t   copy;           /* a VM map copy */
        } backing;
        vm_object_offset_t      offset;         /* offset into object */
        vm_object_size_t        size;           /* size of region */
+       vm_object_offset_t      data_offset;    /* offset to first byte of data */
        vm_prot_t               protection;     /* access permissions */
        int                     ref_count;      /* Number of references */
        unsigned int                            /* Is backing.xxx : */
        /* boolean_t */         internal:1,     /* ... an internal object */
        /* boolean_t */         is_sub_map:1,   /* ... a submap? */
-       /* boolean_t */         is_pager:1;     /* ... a pager port */
+       /* boolean_t */         is_copy:1;      /* ... a VM map copy */
 };
 
 /*
@@ -202,6 +205,84 @@ struct vm_map_links {
        vm_map_offset_t         end;            /* end address */
 };
 
+/*
+ * IMPORTANT:
+ * The "alias" field can be updated while holding the VM map lock
+ * "shared".  It's OK as along as it's the only field that can be
+ * updated without the VM map "exclusive" lock.
+ */
+#define VME_OBJECT(entry) ((entry)->vme_object.vmo_object)
+#define VME_OBJECT_SET(entry, object)                          \
+       MACRO_BEGIN                                             \
+       (entry)->vme_object.vmo_object = (object);              \
+       MACRO_END
+#define VME_SUBMAP(entry) ((entry)->vme_object.vmo_submap)
+#define VME_SUBMAP_SET(entry, submap)                          \
+       MACRO_BEGIN                                             \
+       (entry)->vme_object.vmo_submap = (submap);              \
+       MACRO_END
+#define VME_OFFSET(entry) ((entry)->vme_offset & ~PAGE_MASK)
+#define VME_OFFSET_SET(entry, offset)          \
+       MACRO_BEGIN                             \
+       int __alias;                            \
+       __alias = VME_ALIAS((entry));           \
+       assert((offset & PAGE_MASK) == 0);      \
+       (entry)->vme_offset = offset | __alias; \
+       MACRO_END
+#define VME_OBJECT_SHADOW(entry, length)                       \
+       MACRO_BEGIN                                             \
+       vm_object_t             __object;                       \
+       vm_object_offset_t      __offset;                       \
+       __object = VME_OBJECT((entry));                         \
+       __offset = VME_OFFSET((entry));                         \
+       vm_object_shadow(&__object, &__offset, (length));       \
+       if (__object != VME_OBJECT((entry))) {                  \
+               VME_OBJECT_SET((entry), __object);              \
+               (entry)->use_pmap = TRUE;                       \
+       }                                                       \
+       if (__offset != VME_OFFSET((entry))) {                  \
+               VME_OFFSET_SET((entry), __offset);              \
+       }                                                       \
+       MACRO_END
+
+#define VME_ALIAS_MASK (PAGE_MASK)
+#define VME_ALIAS(entry) ((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK))
+#define VME_ALIAS_SET(entry, alias) \
+       MACRO_BEGIN                                                     \
+       vm_map_offset_t __offset;                                       \
+       __offset = VME_OFFSET((entry));                                 \
+       (entry)->vme_offset = __offset | ((alias) & VME_ALIAS_MASK);    \
+       MACRO_END
+
+/*
+ * FOOTPRINT ACCOUNTING:
+ * The "memory footprint" is better described in the pmap layer.
+ *
+ * At the VM level, these 2 vm_map_entry_t fields are relevant:
+ * iokit_mapped:
+ *     For an "iokit_mapped" entry, we add the size of the entry to the
+ *     footprint when the entry is entered into the map and we subtract that
+ *     size when the entry is removed.  No other accounting should take place.
+ *     "use_pmap" should be FALSE but is not taken into account.
+ * use_pmap: (only when is_sub_map is FALSE)
+ *     This indicates if we should ask the pmap layer to account for pages
+ *     in this mapping.  If FALSE, we expect that another form of accounting
+ *     is being used (e.g. "iokit_mapped" or the explicit accounting of
+ *     non-volatile purgable memory).
+ *
+ * So the logic is mostly:
+ * if entry->is_sub_map == TRUE
+ *     anything in a submap does not count for the footprint
+ * else if entry->iokit_mapped == TRUE
+ *     footprint includes the entire virtual size of this entry
+ * else if entry->use_pmap == FALSE
+ *     tell pmap NOT to account for pages being pmap_enter()'d from this
+ *     mapping (i.e. use "alternate accounting")
+ * else
+ *     pmap will account for pages being pmap_enter()'d from this mapping
+ *     as it sees fit (only if anonymous, etc...)
+ */
+
 struct vm_map_entry {
        struct vm_map_links     links;          /* links to other entries */
 #define vme_prev               links.prev
@@ -210,36 +291,62 @@ struct vm_map_entry {
 #define vme_end                        links.end
 
        struct vm_map_store     store;
-       union vm_map_object     object;         /* object I point to */
-       vm_object_offset_t      offset;         /* offset into object */
+       union vm_map_object     vme_object;     /* object I point to */
+       vm_object_offset_t      vme_offset;     /* offset into object */
+
        unsigned int
-       /* boolean_t */         is_shared:1,    /* region is shared */
-       /* boolean_t */         is_sub_map:1,   /* Is "object" a submap? */
-       /* boolean_t */         in_transition:1, /* Entry being changed */
-       /* boolean_t */         needs_wakeup:1,  /* Waiters on in_transition */
-       /* vm_behavior_t */     behavior:2,     /* user paging behavior hint */
+       /* boolean_t */ is_shared:1,    /* region is shared */
+       /* boolean_t */ is_sub_map:1,   /* Is "object" a submap? */
+       /* boolean_t */ in_transition:1, /* Entry being changed */
+       /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
+       /* vm_behavior_t */ behavior:2, /* user paging behavior hint */
                /* behavior is not defined for submap type */
-       /* boolean_t */         needs_copy:1,   /* object need to be copied? */
+       /* boolean_t */ needs_copy:1,   /* object need to be copied? */
+
                /* Only in task maps: */
-       /* vm_prot_t */         protection:3,   /* protection code */
-       /* vm_prot_t */         max_protection:3,/* maximum protection */
-       /* vm_inherit_t */      inheritance:2,  /* inheritance */
-       /* boolean_t */         use_pmap:1,     /* nested pmaps */
-       /*
-        * IMPORTANT:
-        * The "alias" field can be updated while holding the VM map lock
-        * "shared".  It's OK as along as it's the only field that can be
-        * updated without the VM map "exclusive" lock.
-        */
-       /* unsigned char */     alias:8,        /* user alias */
-       /* boolean_t */         no_cache:1,     /* should new pages be cached? */
-       /* boolean_t */         permanent:1,    /* mapping can not be removed */
-       /* boolean_t */         superpage_size:3,/* use superpages of a certain size */
-       /* boolean_t */         zero_wired_pages:1, /* zero out the wired pages of this entry it is being deleted without unwiring them */
-       /* boolean_t */         used_for_jit:1,
-       /* unsigned char */     pad:1;          /* available bits */
+       /* vm_prot_t */ protection:3,   /* protection code */
+       /* vm_prot_t */ max_protection:3, /* maximum protection */
+       /* vm_inherit_t */ inheritance:2, /* inheritance */
+       /* boolean_t */ use_pmap:1,     /*
+                                        * use_pmap is overloaded:
+                                        * if "is_sub_map":
+                                        *      use a nested pmap?
+                                        * else (i.e. if object):
+                                        *      use pmap accounting
+                                        *      for footprint?
+                                        */
+       /* boolean_t */ no_cache:1,     /* should new pages be cached? */
+       /* boolean_t */ permanent:1,    /* mapping can not be removed */
+       /* boolean_t */ superpage_size:1, /* use superpages of a certain size */
+       /* boolean_t */ map_aligned:1,  /* align to map's page size */
+       /* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of
+                                            * this entry it is being deleted
+                                            * without unwiring them */
+       /* boolean_t */ used_for_jit:1,
+       /* boolean_t */ from_reserved_zone:1, /* Allocated from
+                                              * kernel reserved zone    */
+
+       /* iokit accounting: use the virtual size rather than resident size: */
+       /* boolean_t */ iokit_acct:1,
+       /* boolean_t */ vme_resilient_codesign:1,
+       /* boolean_t */ vme_resilient_media:1,
+       /* boolean_t */ vme_atomic:1, /* entry cannot be split/coalesced */
+               __unused:5;
+;
+
        unsigned short          wired_count;    /* can be paged if = 0 */
        unsigned short          user_wired_count; /* for vm_wire */
+#if    DEBUG
+#define        MAP_ENTRY_CREATION_DEBUG (1)
+#define MAP_ENTRY_INSERTION_DEBUG (1)
+#endif 
+#if    MAP_ENTRY_CREATION_DEBUG
+       struct vm_map_header    *vme_creation_maphdr;
+       uintptr_t               vme_creation_bt[16];
+#endif
+#if    MAP_ENTRY_INSERTION_DEBUG
+       uintptr_t               vme_insertion_bt[16];
+#endif
 };
 
 /*
@@ -272,12 +379,16 @@ struct vm_map_header {
        int                     nentries;       /* Number of entries */
        boolean_t               entries_pageable;
                                                /* are map entries pageable? */
-       vm_map_offset_t         highest_entry_end_addr; /* The ending address of the highest allocated vm_entry_t */
 #ifdef VM_MAP_STORE_USE_RB
        struct rb_head  rb_head_store;
 #endif
+       int                     page_shift;     /* page shift */
 };
 
+#define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
+#define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
+#define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
+
 /*
  *     Type:           vm_map_t [exported; contents invisible]
  *
@@ -294,15 +405,35 @@ struct vm_map_header {
  *             quickly find free space.
  */
 struct _vm_map {
-       lock_t                  lock;           /* uni- and smp-lock */
+       lck_rw_t                        lock;           /* map lock */
        struct vm_map_header    hdr;            /* Map entry header */
 #define min_offset             hdr.links.start /* start of range */
 #define max_offset             hdr.links.end   /* end of range */
-#define highest_entry_end      hdr.highest_entry_end_addr
        pmap_t                  pmap;           /* Physical map */
        vm_map_size_t           size;           /* virtual size */
        vm_map_size_t           user_wire_limit;/* rlimit on user locked memory */
        vm_map_size_t           user_wire_size; /* current size of user locked memory in this map */
+#if __x86_64__
+       vm_map_offset_t         vmmap_high_start;
+#endif /* __x86_64__ */
+
+       union {
+               /*
+                * If map->disable_vmentry_reuse == TRUE:
+                * the end address of the highest allocated vm_map_entry_t.
+                */
+               vm_map_offset_t         vmu1_highest_entry_end;
+               /*
+                * For a nested VM map:
+                * the lowest address in this nested VM map that we would
+                * expect to be unnested under normal operation (i.e. for
+                * regular copy-on-write on DATA section).
+                */
+               vm_map_offset_t         vmu1_lowest_unnestable_start;
+       } vmu1;
+#define highest_entry_end      vmu1.vmu1_highest_entry_end
+#define lowest_unnestable_start        vmu1.vmu1_lowest_unnestable_start
+
        int                     ref_count;      /* Reference count */
 #if    TASK_SWAPPER
        int                     res_count;      /* Residence count (swap) */
@@ -311,21 +442,30 @@ struct _vm_map {
        decl_lck_mtx_data(,     s_lock)         /* Lock ref, res fields */
        lck_mtx_ext_t           s_lock_ext;
        vm_map_entry_t          hint;           /* hint for quick lookups */
-       vm_map_entry_t          first_free;     /* First free space hint */
+       struct vm_map_links*    hole_hint;      /* hint for quick hole lookups */
+       union{
+               vm_map_entry_t          _first_free;    /* First free space hint */
+               struct vm_map_links*    _holes;         /* links all holes between entries */
+       }f_s;                                           /* Union for free space data structures being used */
+
+#define first_free             f_s._first_free
+#define holes_list             f_s._holes
+
        unsigned int            
        /* boolean_t */         wait_for_space:1, /* Should callers wait for space? */
        /* boolean_t */         wiring_required:1, /* All memory wired? */
        /* boolean_t */         no_zero_fill:1, /*No zero fill absent pages */
-       /* boolean_t */         mapped:1, /*has this map been mapped */
+       /* boolean_t */         mapped_in_other_pmaps:1, /*has this submap been mapped in maps that use a different pmap */
        /* boolean_t */         switch_protect:1, /*  Protect map from write faults while switched */
        /* boolean_t */         disable_vmentry_reuse:1, /*  All vm entries should keep using newer and higher addresses in the map */ 
        /* boolean_t */         map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
-       /* reserved */          pad:25;
+       /* boolean_t */         holelistenabled:1,
+       /* boolean_t */         is_nested_map:1,
+       /* boolean_t */         map_disallow_new_exec:1, /* Disallow new executable code */
+       /* reserved */          pad:22;
        unsigned int            timestamp;      /* Version number */
        unsigned int            color_rr;       /* next color (not protected by a lock) */
-#if CONFIG_FREEZE
-       void                    *default_freezer_toc;
-#endif
+
        boolean_t               jit_entry_exists;
 } ;
 
@@ -387,7 +527,11 @@ typedef struct vm_map_version {
  *
  *             The third format is a kernel buffer copy object - for data
  *             small enough that physical copies were the most efficient
- *             method.
+ *             method. This method uses a zero-sized array unioned with
+ *             other format-specific data in the 'c_u' member. This unsized
+ *             array overlaps the other elements and allows us to use this
+ *             extra structure space for physical memory copies. On 64-bit
+ *             systems this saves ~64 bytes per vm_map_copy.
  */
 
 struct vm_map_copy {
@@ -398,12 +542,9 @@ struct vm_map_copy {
        vm_object_offset_t      offset;
        vm_map_size_t           size;
        union {
-           struct vm_map_header        hdr;    /* ENTRY_LIST */
-           vm_object_t                 object; /* OBJECT */
-           struct {                            
-               void                    *kdata;       /* KERNEL_BUFFER */
-               vm_size_t               kalloc_size;  /* size of this copy_t */
-           } c_k;
+               struct vm_map_header    hdr;      /* ENTRY_LIST */
+               vm_object_t             object;   /* OBJECT */
+               uint8_t                 kdata[0]; /* KERNEL_BUFFER */
        } c_u;
 };
 
@@ -411,10 +552,12 @@ struct vm_map_copy {
 #define cpy_hdr                        c_u.hdr
 
 #define cpy_object             c_u.object
+#define cpy_kdata              c_u.kdata
+#define cpy_kdata_hdr_sz       (offsetof(struct vm_map_copy, c_u.kdata))
 
-#define cpy_kdata              c_u.c_k.kdata
-#define cpy_kalloc_size                c_u.c_k.kalloc_size
-
+#define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
+#define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
+#define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
 
 /*
  *     Useful macros for entry list copy objects
@@ -437,26 +580,47 @@ struct vm_map_copy {
 
 #define vm_map_lock_init(map)                                          \
        ((map)->timestamp = 0 ,                                         \
-       lock_init(&(map)->lock, TRUE, 0, 0))
+       lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
 
-#define vm_map_lock(map)               lock_write(&(map)->lock)
+#define vm_map_lock(map)               lck_rw_lock_exclusive(&(map)->lock)
 #define vm_map_unlock(map)                                             \
-               ((map)->timestamp++ ,   lock_write_done(&(map)->lock))
-#define vm_map_lock_read(map)          lock_read(&(map)->lock)
-#define vm_map_unlock_read(map)                lock_read_done(&(map)->lock)
+               ((map)->timestamp++ ,   lck_rw_done(&(map)->lock))
+#define vm_map_lock_read(map)          lck_rw_lock_shared(&(map)->lock)
+#define vm_map_unlock_read(map)                lck_rw_done(&(map)->lock)
 #define vm_map_lock_write_to_read(map)                                 \
-               ((map)->timestamp++ ,   lock_write_to_read(&(map)->lock))
+               ((map)->timestamp++ ,   lck_rw_lock_exclusive_to_shared(&(map)->lock))
 /* lock_read_to_write() returns FALSE on failure.  Macro evaluates to 
  * zero on success and non-zero value on failure.
  */
-#define vm_map_lock_read_to_write(map) (lock_read_to_write(&(map)->lock) != TRUE)
+#define vm_map_lock_read_to_write(map) (lck_rw_lock_shared_to_exclusive(&(map)->lock) != TRUE)
+
+#define vm_map_try_lock(map)           lck_rw_try_lock_exclusive(&(map)->lock)
+#define vm_map_try_lock_read(map)      lck_rw_try_lock_shared(&(map)->lock)
+
+#if MACH_ASSERT || DEBUG
+#define vm_map_lock_assert_held(map) \
+       lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
+#define vm_map_lock_assert_shared(map) \
+       lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
+#define vm_map_lock_assert_exclusive(map) \
+       lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
+#define vm_map_lock_assert_notheld(map) \
+       lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
+#else  /* MACH_ASSERT || DEBUG */ 
+#define vm_map_lock_assert_held(map)
+#define vm_map_lock_assert_shared(map)
+#define vm_map_lock_assert_exclusive(map)
+#define vm_map_lock_assert_notheld(map)
+#endif /* MACH_ASSERT || DEBUG */
 
 /*
  *     Exported procedures that operate on vm_map_t.
  */
 
 /* Initialize the module */
-extern void            vm_map_init(void) __attribute__((section("__TEXT, initcode")));
+extern void            vm_map_init(void);
+
+extern void            vm_kernel_reserved_entry_init(void);
 
 /* Allocate a range in the specified virtual address map and
  * return the entry allocated for that range. */
@@ -466,6 +630,8 @@ extern kern_return_t vm_map_find_space(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                vm_map_entry_t          *o_entry);      /* OUT */
 
 extern void vm_map_clip_start(
@@ -476,10 +642,8 @@ extern void vm_map_clip_end(
        vm_map_t        map,
        vm_map_entry_t  entry,
        vm_map_offset_t endaddr);
-#if !CONFIG_EMBEDDED
 extern boolean_t vm_map_entry_should_cow_for_true_share(
        vm_map_entry_t  entry);
-#endif /* !CONFIG_EMBEDDED */
 
 /* Lookup map entry containing or the specified address in the given map */
 extern boolean_t       vm_map_lookup_entry(
@@ -487,6 +651,15 @@ extern boolean_t   vm_map_lookup_entry(
                                vm_map_address_t        address,
                                vm_map_entry_t          *entry);        /* OUT */
 
+extern void            vm_map_copy_remap(
+       vm_map_t                map,
+       vm_map_entry_t          where,
+       vm_map_copy_t           copy,
+       vm_map_offset_t         adjustment,
+       vm_prot_t               cur_prot,
+       vm_prot_t               max_prot,
+       vm_inherit_t            inheritance);
+
 /* Find the VM object, offset, and protection for a given virtual address
  * in the specified map, assuming a page fault of the  type specified. */
 extern kern_return_t   vm_map_lookup_locked(
@@ -524,7 +697,11 @@ extern vm_map_entry_t      vm_map_entry_insert(
                                unsigned                wired_count,
                                boolean_t               no_cache,
                                boolean_t               permanent,
-                               unsigned int            superpage_size);
+                               unsigned int            superpage_size,
+                               boolean_t               clear_map_aligned,
+                               boolean_t               is_submap,
+                               boolean_t               used_for_jit,
+                               int                     alias);
 
 
 /*
@@ -541,11 +718,6 @@ extern vm_map_entry_t      vm_map_entry_insert(
                                                /* Physical map associated
                                                 * with this address map */
 
-#define                vm_map_verify_done(map, version)    vm_map_unlock_read(map)
-                                               /* Operation that required
-                                                * a verified lookup is
-                                                * now complete */
-
 /*
  * Macros/functions for map residence counts and swapin/out of vm maps
  */
@@ -649,8 +821,8 @@ extern vm_object_t  vm_submap_object;
  */
 #define vm_map_entry_wait(map, interruptible)          \
        ((map)->timestamp++ ,                           \
-        thread_sleep_lock_write((event_t)&(map)->hdr,  \
-                        &(map)->lock, interruptible))
+        lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
+                                 (event_t)&(map)->hdr, interruptible))
 
 
 #define vm_map_entry_wakeup(map)        \
@@ -667,7 +839,7 @@ extern vm_object_t  vm_submap_object;
 
 #define        vm_map_dealloc_fast(map)                \
        MACRO_BEGIN                                     \
-       register int c;                         \
+       int c;                                          \
                                                        \
        lck_mtx_lock(&map->s_lock);                     \
        c = --map->ref_count;                   \
@@ -698,6 +870,11 @@ extern kern_return_t       vm_map_copyin_object(
                                vm_object_size_t        size,
                                vm_map_copy_t           *copy_result); /* OUT */
 
+extern kern_return_t   vm_map_random_address_for_size(
+                               vm_map_t        map,
+                               vm_map_offset_t *address,
+                               vm_map_size_t   size);
+
 /* Enter a mapping */
 extern kern_return_t   vm_map_enter(
                                vm_map_t                map,
@@ -705,6 +882,8 @@ extern kern_return_t        vm_map_enter(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                vm_object_t             object,
                                vm_object_offset_t      offset,
                                boolean_t               needs_copy,
@@ -712,6 +891,23 @@ extern kern_return_t       vm_map_enter(
                                vm_prot_t               max_protection,
                                vm_inherit_t            inheritance);
 
+#if __arm64__
+extern kern_return_t   vm_map_enter_fourk(
+                               vm_map_t                map,
+                               vm_map_offset_t         *address,
+                               vm_map_size_t           size,
+                               vm_map_offset_t         mask,
+                               int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
+                               vm_object_t             object,
+                               vm_object_offset_t      offset,
+                               boolean_t               needs_copy,
+                               vm_prot_t               cur_protection,
+                               vm_prot_t               max_protection,
+                               vm_inherit_t            inheritance);
+#endif /* __arm64__ */
+
 /* XXX should go away - replaced with regular enter of contig object */
 extern  kern_return_t  vm_map_enter_cpm(
                                vm_map_t                map,
@@ -725,6 +921,8 @@ extern kern_return_t vm_map_remap(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                vm_map_t                src_map,
                                vm_map_offset_t         memory_address,
                                boolean_t               copy,
@@ -750,7 +948,11 @@ extern     kern_return_t   vm_map_read_user(
 
 /* Create a new task map using an existing task map as a template. */
 extern vm_map_t                vm_map_fork(
-                               vm_map_t                old_map);
+                               ledger_t                ledger,
+                               vm_map_t                old_map,
+                               int                     options);
+#define VM_MAP_FORK_SHARE_IF_INHERIT_NONE      0x00000001
+#define VM_MAP_FORK_PRESERVE_PURGEABLE         0x00000002
 
 /* Change inheritance */
 extern kern_return_t   vm_map_inherit(
@@ -780,12 +982,6 @@ extern kern_return_t       vm_map_behavior_set(
                                vm_map_offset_t         end,
                                vm_behavior_t           new_behavior);
 
-extern kern_return_t vm_map_purgable_control(
-                               vm_map_t                map,
-                               vm_map_offset_t         address,
-                               vm_purgable_t           control,
-                               int                     *state);
-
 extern kern_return_t vm_map_region(
                                vm_map_t                 map,
                                vm_map_offset_t         *address,
@@ -809,6 +1005,13 @@ extern kern_return_t vm_map_page_query_internal(
                                int                     *disposition,
                                int                     *ref_count);
 
+extern kern_return_t vm_map_query_volatile(
+       vm_map_t        map,
+       mach_vm_size_t  *volatile_virtual_size_p,
+       mach_vm_size_t  *volatile_resident_size_p,
+       mach_vm_size_t  *volatile_compressed_size_p,
+       mach_vm_size_t  *volatile_pmap_size_p,
+       mach_vm_size_t  *volatile_compressed_pmap_size_p);
 
 extern kern_return_t   vm_map_submap(
                                vm_map_t                map,
@@ -846,6 +1049,19 @@ extern kern_return_t vm_map_set_cache_attr(
 
 extern int override_nx(vm_map_t map, uint32_t user_tag);
 
+extern void vm_map_region_top_walk(
+        vm_map_entry_t entry,
+       vm_region_top_info_t top);
+extern void vm_map_region_walk(
+       vm_map_t map,
+       vm_map_offset_t va,
+       vm_map_entry_t entry,
+       vm_object_offset_t offset,
+       vm_object_size_t range,
+       vm_region_extended_info_t extended,
+       boolean_t look_for_pages,
+       mach_msg_type_number_t count);
+
 #endif /* MACH_KERNEL_PRIVATE */
 
 __BEGIN_DECLS
@@ -857,6 +1073,8 @@ extern vm_map_t            vm_map_create(
                                vm_map_offset_t         max_off,
                                boolean_t               pageable);
 
+extern void            vm_map_disable_hole_optimization(vm_map_t map);
+
 /* Get rid of a map */
 extern void            vm_map_destroy(
                                vm_map_t                map,
@@ -885,6 +1103,43 @@ extern boolean_t vm_map_check_protection(
                                vm_prot_t               protection);
 
 /* wire down a region */
+
+#ifdef XNU_KERNEL_PRIVATE
+
+extern kern_return_t   vm_map_wire_kernel(
+                               vm_map_t                map,
+                               vm_map_offset_t         start,
+                               vm_map_offset_t         end,
+                               vm_prot_t               access_type,
+                               vm_tag_t                tag,
+                               boolean_t               user_wire);
+
+extern kern_return_t   vm_map_wire_and_extract_kernel(
+                               vm_map_t                map,
+                               vm_map_offset_t         start,
+                               vm_prot_t               access_type,
+                               vm_tag_t                tag,
+                               boolean_t               user_wire,
+                               ppnum_t                 *physpage_p);
+
+/* kext exported versions */
+
+extern kern_return_t   vm_map_wire_external(
+                               vm_map_t                map,
+                               vm_map_offset_t         start,
+                               vm_map_offset_t         end,
+                               vm_prot_t               access_type,
+                               boolean_t               user_wire);
+
+extern kern_return_t   vm_map_wire_and_extract_external(
+                               vm_map_t                map,
+                               vm_map_offset_t         start,
+                               vm_prot_t               access_type,
+                               boolean_t               user_wire,
+                               ppnum_t                 *physpage_p);
+
+#else /* XNU_KERNEL_PRIVATE */
+
 extern kern_return_t   vm_map_wire(
                                vm_map_t                map,
                                vm_map_offset_t         start,
@@ -892,6 +1147,15 @@ extern kern_return_t      vm_map_wire(
                                vm_prot_t               access_type,
                                boolean_t               user_wire);
 
+extern kern_return_t   vm_map_wire_and_extract(
+                               vm_map_t                map,
+                               vm_map_offset_t         start,
+                               vm_prot_t               access_type,
+                               boolean_t               user_wire,
+                               ppnum_t                 *physpage_p);
+
+#endif /* !XNU_KERNEL_PRIVATE */
+
 /* unwire a region */
 extern kern_return_t   vm_map_unwire(
                                vm_map_t                map,
@@ -899,6 +1163,8 @@ extern kern_return_t       vm_map_unwire(
                                vm_map_offset_t         end,
                                boolean_t               user_wire);
 
+#ifdef XNU_KERNEL_PRIVATE
+
 /* Enter a mapping of a memory object */
 extern kern_return_t   vm_map_enter_mem_object(
                                vm_map_t                map,
@@ -906,6 +1172,8 @@ extern kern_return_t       vm_map_enter_mem_object(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                ipc_port_t              port,
                                vm_object_offset_t      offset,
                                boolean_t               needs_copy,
@@ -913,6 +1181,22 @@ extern kern_return_t      vm_map_enter_mem_object(
                                vm_prot_t               max_protection,
                                vm_inherit_t            inheritance);
 
+/* Enter a mapping of a memory object */
+extern kern_return_t   vm_map_enter_mem_object_prefault(
+                               vm_map_t                map,
+                               vm_map_offset_t         *address,
+                               vm_map_size_t           size,
+                               vm_map_offset_t         mask,
+                               int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
+                               ipc_port_t              port,
+                               vm_object_offset_t      offset,
+                               vm_prot_t               cur_protection,
+                               vm_prot_t               max_protection,
+                               upl_page_list_ptr_t     page_list,
+                               unsigned int            page_list_count);
+
 /* Enter a mapping of a memory object */
 extern kern_return_t   vm_map_enter_mem_object_control(
                                vm_map_t                map,
@@ -920,6 +1204,8 @@ extern kern_return_t       vm_map_enter_mem_object_control(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                memory_object_control_t control,
                                vm_object_offset_t      offset,
                                boolean_t               needs_copy,
@@ -927,6 +1213,8 @@ extern kern_return_t       vm_map_enter_mem_object_control(
                                vm_prot_t               max_protection,
                                vm_inherit_t            inheritance);
 
+#endif /* !XNU_KERNEL_PRIVATE */
+
 /* Deallocate a region */
 extern kern_return_t   vm_map_remove(
                                vm_map_t                map,
@@ -934,6 +1222,13 @@ extern kern_return_t      vm_map_remove(
                                vm_map_offset_t         end,
                                boolean_t               flags);
 
+/* Deallocate a region when the map is already locked */
+extern kern_return_t   vm_map_remove_locked(
+                               vm_map_t        map,
+                               vm_map_offset_t     start,
+                               vm_map_offset_t     end,
+                               boolean_t       flags);
+
 /* Discard a copy without using it */
 extern void            vm_map_copy_discard(
                                vm_map_copy_t           copy);
@@ -945,12 +1240,34 @@ extern kern_return_t     vm_map_copy_overwrite(
                                vm_map_copy_t           copy,
                                boolean_t               interruptible);
 
+/* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
+extern boolean_t       vm_map_copy_validate_size(
+                               vm_map_t                dst_map,
+                               vm_map_copy_t           copy,
+                               vm_map_size_t           *size);
+
 /* Place a copy into a map */
 extern kern_return_t   vm_map_copyout(
                                vm_map_t                dst_map,
                                vm_map_address_t        *dst_addr,      /* OUT */
                                vm_map_copy_t           copy);
 
+extern kern_return_t vm_map_copyout_size(
+                               vm_map_t                dst_map,
+                               vm_map_address_t        *dst_addr,      /* OUT */
+                               vm_map_copy_t           copy,
+                               vm_map_size_t           copy_size);
+
+extern kern_return_t   vm_map_copyout_internal(
+       vm_map_t                dst_map,
+       vm_map_address_t        *dst_addr,      /* OUT */
+       vm_map_copy_t           copy,
+       vm_map_size_t           copy_size,
+       boolean_t               consume_on_success,
+       vm_prot_t               cur_protection,
+       vm_prot_t               max_protection,
+       vm_inherit_t            inheritance);
+
 extern kern_return_t   vm_map_copyin(
                                vm_map_t                        src_map,
                                vm_map_address_t        src_addr,
@@ -967,6 +1284,27 @@ extern kern_return_t      vm_map_copyin_common(
                                vm_map_copy_t           *copy_result,   /* OUT */
                                boolean_t               use_maxprot);
 
+#define VM_MAP_COPYIN_SRC_DESTROY      0x00000001
+#define VM_MAP_COPYIN_USE_MAXPROT      0x00000002
+#define VM_MAP_COPYIN_ENTRY_LIST       0x00000004
+#define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
+#define VM_MAP_COPYIN_ALL_FLAGS                0x0000000F
+extern kern_return_t   vm_map_copyin_internal(
+                               vm_map_t                src_map,
+                               vm_map_address_t        src_addr,
+                               vm_map_size_t           len,
+                               int                     flags,
+                               vm_map_copy_t           *copy_result); /* OUT */
+
+extern kern_return_t   vm_map_copy_extract(
+       vm_map_t                src_map,
+       vm_map_address_t        src_addr,
+       vm_map_size_t           len,
+       vm_map_copy_t           *copy_result,   /* OUT */
+       vm_prot_t               *cur_prot,      /* OUT */
+       vm_prot_t               *max_prot);
+
+
 extern void            vm_map_disable_NX(
                                vm_map_t                map);
 
@@ -979,35 +1317,89 @@ extern void              vm_map_set_64bit(
 extern void            vm_map_set_32bit(
                                vm_map_t                map);
 
-extern boolean_t       vm_map_is_64bit(
+extern void            vm_map_set_jumbo(
                                vm_map_t                map);
 
-extern boolean_t       vm_map_has_4GB_pagezero(
-                               vm_map_t                map);
+extern boolean_t       vm_map_has_hard_pagezero(
+                               vm_map_t                map,
+                               vm_map_offset_t         pagezero_size);
+extern void            vm_commit_pagezero_status(vm_map_t      tmap);
 
-extern void            vm_map_set_4GB_pagezero(
+#ifdef __arm__
+static inline boolean_t vm_map_is_64bit(__unused vm_map_t map) { return 0; }
+#else
+extern boolean_t       vm_map_is_64bit(
                                vm_map_t                map);
+#endif
 
-extern void            vm_map_clear_4GB_pagezero(
-                               vm_map_t                map);
+
+extern kern_return_t   vm_map_raise_max_offset(
+       vm_map_t        map,
+       vm_map_offset_t new_max_offset);
 
 extern kern_return_t   vm_map_raise_min_offset(
        vm_map_t        map,
        vm_map_offset_t new_min_offset);
+#if __x86_64__
+extern void vm_map_set_high_start(
+       vm_map_t        map,
+       vm_map_offset_t high_start);
+#endif /* __x86_64__ */
 
 extern vm_map_offset_t vm_compute_max_offset(
-                               unsigned                is64);
+                               boolean_t               is64);
+
+extern void            vm_map_get_max_aslr_slide_section(
+                               vm_map_t                map,
+                               int64_t                 *max_sections,
+                               int64_t                 *section_size);
+
+extern uint64_t        vm_map_get_max_aslr_slide_pages(
+                               vm_map_t map);
+
+extern uint64_t        vm_map_get_max_loader_aslr_slide_pages(
+                               vm_map_t map);
 
 extern void            vm_map_set_user_wire_limit(
                                vm_map_t                map,
                                vm_size_t               limit);
 
 extern void vm_map_switch_protect(
-                               vm_map_t                map, 
+                               vm_map_t                map,
                                boolean_t               val);
 
+extern void vm_map_iokit_mapped_region(
+                               vm_map_t                map,
+                               vm_size_t               bytes);
+
+extern void vm_map_iokit_unmapped_region(
+                               vm_map_t                map,
+                               vm_size_t               bytes);
+
+
 extern boolean_t first_free_is_valid(vm_map_t);
 
+extern int             vm_map_page_shift(
+                               vm_map_t                map);
+
+extern vm_map_offset_t vm_map_page_mask(
+                               vm_map_t                map);
+
+extern int             vm_map_page_size(
+                               vm_map_t                map);
+
+extern vm_map_offset_t vm_map_round_page_mask(
+                               vm_map_offset_t         offset,
+                               vm_map_offset_t         mask);
+
+extern vm_map_offset_t vm_map_trunc_page_mask(
+                               vm_map_offset_t         offset,
+                               vm_map_offset_t         mask);
+
+extern boolean_t       vm_map_page_aligned(
+                               vm_map_offset_t         offset,
+                               vm_map_offset_t         mask);
+
 #ifdef XNU_KERNEL_PRIVATE
 extern kern_return_t vm_map_page_info(
        vm_map_t                map,
@@ -1015,6 +1407,13 @@ extern kern_return_t vm_map_page_info(
        vm_page_info_flavor_t   flavor,
        vm_page_info_t          info,
        mach_msg_type_number_t  *count);
+extern kern_return_t vm_map_page_range_info_internal(
+       vm_map_t                map,
+       vm_map_offset_t         start_offset,
+       vm_map_offset_t         end_offset,
+       vm_page_info_flavor_t   flavor,
+       vm_page_info_t          info,
+       mach_msg_type_number_t  *count);
 #endif /* XNU_KERNEL_PRIVATE */
 
 
@@ -1038,13 +1437,44 @@ extern kern_return_t vm_map_page_info(
                vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
                                        FALSE, copy_result, TRUE)
 
-#endif /* MACH_KERNEL_PRIVATE */
+
+/*
+ * Internal macros for rounding and truncation of vm_map offsets and sizes
+ */
+#define VM_MAP_ROUND_PAGE(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
+#define VM_MAP_TRUNC_PAGE(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
 
 /*
  * Macros for rounding and truncation of vm_map offsets and sizes
  */
-#define vm_map_round_page(x) (((vm_map_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
-#define vm_map_trunc_page(x) ((vm_map_offset_t)(x) & ~((signed)PAGE_MASK))     
+#define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT)
+#define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
+#define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
+#define VM_MAP_PAGE_ALIGNED(x,pgmask) (((x) & (pgmask)) == 0)
+
+static inline void vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
+{ 
+       switch (prot) {
+               case MAP_MEM_NOOP:              break;
+               case MAP_MEM_IO:                *wimg = VM_WIMG_IO; break;
+               case MAP_MEM_COPYBACK:          *wimg = VM_WIMG_USE_DEFAULT; break;
+               case MAP_MEM_INNERWBACK:        *wimg = VM_WIMG_INNERWBACK; break;
+               case MAP_MEM_POSTED:            *wimg = VM_WIMG_POSTED; break;
+               case MAP_MEM_WTHRU:             *wimg = VM_WIMG_WTHRU; break;
+               case MAP_MEM_WCOMB:             *wimg = VM_WIMG_WCOMB; break;
+               default:
+                       panic("Unrecognized mapping type %u\n", prot);
+       }
+}
+
+#endif /* MACH_KERNEL_PRIVATE */
+
+#ifdef XNU_KERNEL_PRIVATE
+extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
+#endif /* XNU_KERNEL_PRIVATE */
+
+#define vm_map_round_page(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
+#define vm_map_trunc_page(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
 
 /*
  * Flags for vm_map_remove() and vm_map_delete()
@@ -1055,18 +1485,32 @@ extern kern_return_t vm_map_page_info(
 #define        VM_MAP_REMOVE_WAIT_FOR_KWIRE    0x4
 #define VM_MAP_REMOVE_SAVE_ENTRIES     0x8
 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP  0x10
+#define VM_MAP_REMOVE_NO_MAP_ALIGN     0x20
+#define VM_MAP_REMOVE_NO_UNNESTING     0x40
+#define VM_MAP_REMOVE_IMMUTABLE                0x80
 
 /* Support for UPLs from vm_maps */
 
+#ifdef XNU_KERNEL_PRIVATE
+
 extern kern_return_t vm_map_get_upl(
                                vm_map_t                target_map,
                                vm_map_offset_t         map_offset,
                                upl_size_t              *size,
                                upl_t                   *upl,
                                upl_page_info_array_t   page_info,
-                               unsigned int    *page_infoCnt,
-                               int             *flags,
-                               int             force_data_sync);
+                               unsigned int            *page_infoCnt,
+                               upl_control_flags_t     *flags,
+                               vm_tag_t                tag,
+                               int                     force_data_sync);
+
+#endif /* XNU_KERNEL_PRIVATE */
+
+extern void
+vm_map_sizes(vm_map_t map,
+               vm_map_size_t * psize,
+               vm_map_size_t * pfree,
+               vm_map_size_t * plargest_free);
 
 #if CONFIG_DYNAMIC_CODE_SIGNING
 extern kern_return_t vm_map_sign(vm_map_t map, 
@@ -1074,14 +1518,21 @@ extern kern_return_t vm_map_sign(vm_map_t map,
                                 vm_map_offset_t end);
 #endif
 
-#if CONFIG_FREEZE
-extern kern_return_t vm_map_freeze_walk(
+extern kern_return_t vm_map_partial_reap(
                vm_map_t map,
-               unsigned int *purgeable_count,
-               unsigned int *wired_count,
-               unsigned int *clean_count,
-               unsigned int *dirty_count,
-               boolean_t *has_shared);
+               unsigned int *reclaimed_resident,
+               unsigned int *reclaimed_compressed);
+
+
+#if DEVELOPMENT || DEBUG
+
+extern int vm_map_disconnect_page_mappings(
+               vm_map_t map,
+               boolean_t);
+#endif
+
+
+#if CONFIG_FREEZE
 
 extern kern_return_t vm_map_freeze(
                vm_map_t map,
@@ -1089,14 +1540,20 @@ extern kern_return_t vm_map_freeze(
                unsigned int *wired_count,
                unsigned int *clean_count,
                unsigned int *dirty_count,
+               unsigned int dirty_budget,
                boolean_t *has_shared);
-                
-extern void vm_map_thaw(
-                vm_map_t map);
 #endif
 
 __END_DECLS
 
+/*
+ * In some cases, we don't have a real VM object but still want to return a
+ * unique ID (to avoid a memory region looking like shared memory), so build
+ * a fake pointer based on the map's ledger and the index of the ledger being
+ * reported.
+ */
+#define INFO_MAKE_FAKE_OBJECT_ID(map,ledger_id)        ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
+
 #endif /* KERNEL_PRIVATE */
  
 #endif /* _VM_VM_MAP_H_ */