]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_map.h
xnu-4903.221.2.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map.h
index 6d05060c1c8ac4a90e60bf598e094ffd4e26f9dd..44cef715dae6faaf76c4fa061d21094f14b87aa7 100644 (file)
@@ -91,10 +91,12 @@ extern vm_map_t current_map(void);
 
 /* Setup reserved areas in a new VM map */
 extern kern_return_t   vm_map_exec(
-                               vm_map_t                new_map,
-                               task_t                  task,
-                               void                    *fsroot,
-                               cpu_type_t              cpu);
+       vm_map_t                new_map,
+       task_t                  task,
+       boolean_t               is64bit,
+       void                    *fsroot,
+       cpu_type_t              cpu,
+       cpu_subtype_t           cpu_subtype);
 
 __END_DECLS
 
@@ -138,14 +140,17 @@ typedef struct vm_map_entry       *vm_map_entry_t;
  *             memory object or a sub map (of the kernel map).
  */
 typedef union vm_map_object {
-       vm_object_t             vm_object;      /* object object */
-       vm_map_t                sub_map;        /* belongs to another map */
+       vm_object_t             vmo_object;     /* object object */
+       vm_map_t                vmo_submap;     /* belongs to another map */
 } vm_map_object_t;
 
 #define named_entry_lock_init(object)  lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
 #define named_entry_lock_destroy(object)       lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
 #define named_entry_lock(object)               lck_mtx_lock(&(object)->Lock)
 #define named_entry_unlock(object)             lck_mtx_unlock(&(object)->Lock)   
+#if VM_NAMED_ENTRY_LIST
+extern queue_head_t vm_named_entry_list;
+#endif /* VM_NAMED_ENTRY_LIST */
 
 /*
  *     Type:           vm_named_entry_t [internal use only]
@@ -169,7 +174,6 @@ struct vm_named_entry {
        decl_lck_mtx_data(,     Lock)           /* Synchronization */
        union {
                vm_object_t     object;         /* object I point to */
-               memory_object_t pager;          /* amo pager port */
                vm_map_t        map;            /* map backing submap */
                vm_map_copy_t   copy;           /* a VM map copy */
        } backing;
@@ -181,8 +185,14 @@ struct vm_named_entry {
        unsigned int                            /* Is backing.xxx : */
        /* boolean_t */         internal:1,     /* ... an internal object */
        /* boolean_t */         is_sub_map:1,   /* ... a submap? */
-       /* boolean_t */         is_pager:1,     /* ... a pager port */
        /* boolean_t */         is_copy:1;      /* ... a VM map copy */
+#if VM_NAMED_ENTRY_LIST
+       queue_chain_t           named_entry_list;
+       int                     named_entry_alias;
+       mach_port_t             named_entry_port;
+#define NAMED_ENTRY_BT_DEPTH 16
+       void                    *named_entry_bt[NAMED_ENTRY_BT_DEPTH];
+#endif /* VM_NAMED_ENTRY_LIST */
 };
 
 /*
@@ -206,6 +216,84 @@ struct vm_map_links {
        vm_map_offset_t         end;            /* end address */
 };
 
+/*
+ * IMPORTANT:
+ * The "alias" field can be updated while holding the VM map lock
+ * "shared".  It's OK as along as it's the only field that can be
+ * updated without the VM map "exclusive" lock.
+ */
+#define VME_OBJECT(entry) ((entry)->vme_object.vmo_object)
+#define VME_OBJECT_SET(entry, object)                          \
+       MACRO_BEGIN                                             \
+       (entry)->vme_object.vmo_object = (object);              \
+       MACRO_END
+#define VME_SUBMAP(entry) ((entry)->vme_object.vmo_submap)
+#define VME_SUBMAP_SET(entry, submap)                          \
+       MACRO_BEGIN                                             \
+       (entry)->vme_object.vmo_submap = (submap);              \
+       MACRO_END
+#define VME_OFFSET(entry) ((entry)->vme_offset & ~PAGE_MASK)
+#define VME_OFFSET_SET(entry, offset)          \
+       MACRO_BEGIN                             \
+       int __alias;                            \
+       __alias = VME_ALIAS((entry));           \
+       assert((offset & PAGE_MASK) == 0);      \
+       (entry)->vme_offset = offset | __alias; \
+       MACRO_END
+#define VME_OBJECT_SHADOW(entry, length)                       \
+       MACRO_BEGIN                                             \
+       vm_object_t             __object;                       \
+       vm_object_offset_t      __offset;                       \
+       __object = VME_OBJECT((entry));                         \
+       __offset = VME_OFFSET((entry));                         \
+       vm_object_shadow(&__object, &__offset, (length));       \
+       if (__object != VME_OBJECT((entry))) {                  \
+               VME_OBJECT_SET((entry), __object);              \
+               (entry)->use_pmap = TRUE;                       \
+       }                                                       \
+       if (__offset != VME_OFFSET((entry))) {                  \
+               VME_OFFSET_SET((entry), __offset);              \
+       }                                                       \
+       MACRO_END
+
+#define VME_ALIAS_MASK (PAGE_MASK)
+#define VME_ALIAS(entry) ((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK))
+#define VME_ALIAS_SET(entry, alias) \
+       MACRO_BEGIN                                                     \
+       vm_map_offset_t __offset;                                       \
+       __offset = VME_OFFSET((entry));                                 \
+       (entry)->vme_offset = __offset | ((alias) & VME_ALIAS_MASK);    \
+       MACRO_END
+
+/*
+ * FOOTPRINT ACCOUNTING:
+ * The "memory footprint" is better described in the pmap layer.
+ *
+ * At the VM level, these 2 vm_map_entry_t fields are relevant:
+ * iokit_mapped:
+ *     For an "iokit_mapped" entry, we add the size of the entry to the
+ *     footprint when the entry is entered into the map and we subtract that
+ *     size when the entry is removed.  No other accounting should take place.
+ *     "use_pmap" should be FALSE but is not taken into account.
+ * use_pmap: (only when is_sub_map is FALSE)
+ *     This indicates if we should ask the pmap layer to account for pages
+ *     in this mapping.  If FALSE, we expect that another form of accounting
+ *     is being used (e.g. "iokit_mapped" or the explicit accounting of
+ *     non-volatile purgable memory).
+ *
+ * So the logic is mostly:
+ * if entry->is_sub_map == TRUE
+ *     anything in a submap does not count for the footprint
+ * else if entry->iokit_mapped == TRUE
+ *     footprint includes the entire virtual size of this entry
+ * else if entry->use_pmap == FALSE
+ *     tell pmap NOT to account for pages being pmap_enter()'d from this
+ *     mapping (i.e. use "alternate accounting")
+ * else
+ *     pmap will account for pages being pmap_enter()'d from this mapping
+ *     as it sees fit (only if anonymous, etc...)
+ */
+
 struct vm_map_entry {
        struct vm_map_links     links;          /* links to other entries */
 #define vme_prev               links.prev
@@ -214,46 +302,49 @@ struct vm_map_entry {
 #define vme_end                        links.end
 
        struct vm_map_store     store;
-       union vm_map_object     object;         /* object I point to */
-       vm_object_offset_t      offset;         /* offset into object */
+       union vm_map_object     vme_object;     /* object I point to */
+       vm_object_offset_t      vme_offset;     /* offset into object */
+
        unsigned int
-       /* boolean_t */         is_shared:1,    /* region is shared */
-       /* boolean_t */         is_sub_map:1,   /* Is "object" a submap? */
-       /* boolean_t */         in_transition:1, /* Entry being changed */
-       /* boolean_t */         needs_wakeup:1,  /* Waiters on in_transition */
-       /* vm_behavior_t */     behavior:2,     /* user paging behavior hint */
+       /* boolean_t */ is_shared:1,    /* region is shared */
+       /* boolean_t */ is_sub_map:1,   /* Is "object" a submap? */
+       /* boolean_t */ in_transition:1, /* Entry being changed */
+       /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
+       /* vm_behavior_t */ behavior:2, /* user paging behavior hint */
                /* behavior is not defined for submap type */
-       /* boolean_t */         needs_copy:1,   /* object need to be copied? */
+       /* boolean_t */ needs_copy:1,   /* object need to be copied? */
+
                /* Only in task maps: */
-       /* vm_prot_t */         protection:3,   /* protection code */
-       /* vm_prot_t */         max_protection:3,/* maximum protection */
-       /* vm_inherit_t */      inheritance:2,  /* inheritance */
-       /* boolean_t */         use_pmap:1,     /*
-                                                * use_pmap is overloaded:
-                                                * if "is_sub_map":
-                                                *      use a nested pmap?
-                                                * else (i.e. if object):
-                                                *      use pmap accounting
-                                                *      for footprint?
-                                                */
-       /*
-        * IMPORTANT:
-        * The "alias" field can be updated while holding the VM map lock
-        * "shared".  It's OK as along as it's the only field that can be
-        * updated without the VM map "exclusive" lock.
-        */
-       /* unsigned char */     alias:8,        /* user alias */
-       /* boolean_t */         no_cache:1,     /* should new pages be cached? */
-       /* boolean_t */         permanent:1,    /* mapping can not be removed */
-       /* boolean_t */         superpage_size:1,/* use superpages of a certain size */
-       /* boolean_t */         map_aligned:1,  /* align to map's page size */
-       /* boolean_t */         zero_wired_pages:1, /* zero out the wired pages of this entry it is being deleted without unwiring them */
-       /* boolean_t */         used_for_jit:1,
-       /* boolean_t */ from_reserved_zone:1,   /* Allocated from
-                                                * kernel reserved zone  */
+       /* vm_prot_t */ protection:3,   /* protection code */
+       /* vm_prot_t */ max_protection:3, /* maximum protection */
+       /* vm_inherit_t */ inheritance:2, /* inheritance */
+       /* boolean_t */ use_pmap:1,     /*
+                                        * use_pmap is overloaded:
+                                        * if "is_sub_map":
+                                        *      use a nested pmap?
+                                        * else (i.e. if object):
+                                        *      use pmap accounting
+                                        *      for footprint?
+                                        */
+       /* boolean_t */ no_cache:1,     /* should new pages be cached? */
+       /* boolean_t */ permanent:1,    /* mapping can not be removed */
+       /* boolean_t */ superpage_size:1, /* use superpages of a certain size */
+       /* boolean_t */ map_aligned:1,  /* align to map's page size */
+       /* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of
+                                            * this entry it is being deleted
+                                            * without unwiring them */
+       /* boolean_t */ used_for_jit:1,
+       /* boolean_t */ pmap_cs_associated:1, /* pmap_cs will validate */
+       /* boolean_t */ from_reserved_zone:1, /* Allocated from
+                                              * kernel reserved zone    */
 
        /* iokit accounting: use the virtual size rather than resident size: */
-       /* boolean_t */ iokit_acct:1;
+       /* boolean_t */ iokit_acct:1,
+       /* boolean_t */ vme_resilient_codesign:1,
+       /* boolean_t */ vme_resilient_media:1,
+       /* boolean_t */ vme_atomic:1, /* entry cannot be split/coalesced */
+               __unused:4;
+;
 
        unsigned short          wired_count;    /* can be paged if = 0 */
        unsigned short          user_wired_count; /* for vm_wire */
@@ -300,7 +391,6 @@ struct vm_map_header {
        int                     nentries;       /* Number of entries */
        boolean_t               entries_pageable;
                                                /* are map entries pageable? */
-       vm_map_offset_t         highest_entry_end_addr; /* The ending address of the highest allocated vm_entry_t */
 #ifdef VM_MAP_STORE_USE_RB
        struct rb_head  rb_head_store;
 #endif
@@ -327,24 +417,58 @@ struct vm_map_header {
  *             quickly find free space.
  */
 struct _vm_map {
-       lck_rw_t                        lock;           /* map lock */
+       lck_rw_t                lock;           /* map lock */
        struct vm_map_header    hdr;            /* Map entry header */
 #define min_offset             hdr.links.start /* start of range */
 #define max_offset             hdr.links.end   /* end of range */
-#define highest_entry_end      hdr.highest_entry_end_addr
        pmap_t                  pmap;           /* Physical map */
        vm_map_size_t           size;           /* virtual size */
        vm_map_size_t           user_wire_limit;/* rlimit on user locked memory */
        vm_map_size_t           user_wire_size; /* current size of user locked memory in this map */
-       int                     ref_count;      /* Reference count */
+#if __x86_64__
+       vm_map_offset_t         vmmap_high_start;
+#endif /* __x86_64__ */
+
+       union {
+               /*
+                * If map->disable_vmentry_reuse == TRUE:
+                * the end address of the highest allocated vm_map_entry_t.
+                */
+               vm_map_offset_t         vmu1_highest_entry_end;
+               /*
+                * For a nested VM map:
+                * the lowest address in this nested VM map that we would
+                * expect to be unnested under normal operation (i.e. for
+                * regular copy-on-write on DATA section).
+                */
+               vm_map_offset_t         vmu1_lowest_unnestable_start;
+       } vmu1;
+#define highest_entry_end      vmu1.vmu1_highest_entry_end
+#define lowest_unnestable_start        vmu1.vmu1_lowest_unnestable_start
+       decl_lck_mtx_data(,     s_lock)         /* Lock ref, res fields */
+       lck_mtx_ext_t           s_lock_ext;
+       vm_map_entry_t          hint;           /* hint for quick lookups */
+       union {
+               struct vm_map_links* vmmap_hole_hint;   /* hint for quick hole lookups */
+               struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
+       } vmmap_u_1;
+#define hole_hint vmmap_u_1.vmmap_hole_hint
+#define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
+       union{
+               vm_map_entry_t          _first_free;    /* First free space hint */
+               struct vm_map_links*    _holes;         /* links all holes between entries */
+       } f_s;                                          /* Union for free space data structures being used */
+
+#define first_free             f_s._first_free
+#define holes_list             f_s._holes
+
+       int                     map_refcnt;     /* Reference count */
+
 #if    TASK_SWAPPER
        int                     res_count;      /* Residence count (swap) */
        int                     sw_state;       /* Swap state */
 #endif /* TASK_SWAPPER */
-       decl_lck_mtx_data(,     s_lock)         /* Lock ref, res fields */
-       lck_mtx_ext_t           s_lock_ext;
-       vm_map_entry_t          hint;           /* hint for quick lookups */
-       vm_map_entry_t          first_free;     /* First free space hint */
+
        unsigned int            
        /* boolean_t */         wait_for_space:1, /* Should callers wait for space? */
        /* boolean_t */         wiring_required:1, /* All memory wired? */
@@ -353,16 +477,18 @@ struct _vm_map {
        /* boolean_t */         switch_protect:1, /*  Protect map from write faults while switched */
        /* boolean_t */         disable_vmentry_reuse:1, /*  All vm entries should keep using newer and higher addresses in the map */ 
        /* boolean_t */         map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
-       /* reserved */          pad:25;
+       /* boolean_t */         holelistenabled:1,
+       /* boolean_t */         is_nested_map:1,
+       /* boolean_t */         map_disallow_new_exec:1, /* Disallow new executable code */
+       /* boolean_t */         jit_entry_exists:1,
+       /* boolean_t */         has_corpse_footprint:1,
+       /* boolean_t */         warned_delete_gap:1,
+       /* reserved */          pad:19;
        unsigned int            timestamp;      /* Version number */
-       unsigned int            color_rr;       /* next color (not protected by a lock) */
-#if CONFIG_FREEZE
-       void                    *default_freezer_handle;
-#endif
-       boolean_t               jit_entry_exists;
-} ;
+};
 
-#define vm_map_to_entry(map)   ((struct vm_map_entry *) &(map)->hdr.links)
+#define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
+#define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
 #define vm_map_first_entry(map)        ((map)->hdr.links.next)
 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
 
@@ -420,7 +546,11 @@ typedef struct vm_map_version {
  *
  *             The third format is a kernel buffer copy object - for data
  *             small enough that physical copies were the most efficient
- *             method.
+ *             method. This method uses a zero-sized array unioned with
+ *             other format-specific data in the 'c_u' member. This unsized
+ *             array overlaps the other elements and allows us to use this
+ *             extra structure space for physical memory copies. On 64-bit
+ *             systems this saves ~64 bytes per vm_map_copy.
  */
 
 struct vm_map_copy {
@@ -431,12 +561,9 @@ struct vm_map_copy {
        vm_object_offset_t      offset;
        vm_map_size_t           size;
        union {
-           struct vm_map_header        hdr;    /* ENTRY_LIST */
-           vm_object_t                 object; /* OBJECT */
-           struct {                            
-               void                    *kdata;       /* KERNEL_BUFFER */
-               vm_size_t               kalloc_size;  /* size of this copy_t */
-           } c_k;
+               struct vm_map_header    hdr;      /* ENTRY_LIST */
+               vm_object_t             object;   /* OBJECT */
+               uint8_t                 kdata[0]; /* KERNEL_BUFFER */
        } c_u;
 };
 
@@ -444,9 +571,8 @@ struct vm_map_copy {
 #define cpy_hdr                        c_u.hdr
 
 #define cpy_object             c_u.object
-
-#define cpy_kdata              c_u.c_k.kdata
-#define cpy_kalloc_size                c_u.c_k.kalloc_size
+#define cpy_kdata              c_u.kdata
+#define cpy_kdata_hdr_sz       (offsetof(struct vm_map_copy, c_u.kdata))
 
 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
@@ -456,8 +582,7 @@ struct vm_map_copy {
  *     Useful macros for entry list copy objects
  */
 
-#define vm_map_copy_to_entry(copy)             \
-               ((struct vm_map_entry *) &(copy)->cpy_hdr.links)
+#define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
 #define vm_map_copy_first_entry(copy)          \
                ((copy)->cpy_hdr.links.next)
 #define vm_map_copy_last_entry(copy)           \
@@ -487,6 +612,25 @@ struct vm_map_copy {
  */
 #define vm_map_lock_read_to_write(map) (lck_rw_lock_shared_to_exclusive(&(map)->lock) != TRUE)
 
+#define vm_map_try_lock(map)           lck_rw_try_lock_exclusive(&(map)->lock)
+#define vm_map_try_lock_read(map)      lck_rw_try_lock_shared(&(map)->lock)
+
+#if MACH_ASSERT || DEBUG
+#define vm_map_lock_assert_held(map) \
+       lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
+#define vm_map_lock_assert_shared(map) \
+       lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
+#define vm_map_lock_assert_exclusive(map) \
+       lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
+#define vm_map_lock_assert_notheld(map) \
+       lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
+#else  /* MACH_ASSERT || DEBUG */ 
+#define vm_map_lock_assert_held(map)
+#define vm_map_lock_assert_shared(map)
+#define vm_map_lock_assert_exclusive(map)
+#define vm_map_lock_assert_notheld(map)
+#endif /* MACH_ASSERT || DEBUG */
+
 /*
  *     Exported procedures that operate on vm_map_t.
  */
@@ -504,6 +648,8 @@ extern kern_return_t vm_map_find_space(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                vm_map_entry_t          *o_entry);      /* OUT */
 
 extern void vm_map_clip_start(
@@ -571,7 +717,9 @@ extern vm_map_entry_t       vm_map_entry_insert(
                                boolean_t               permanent,
                                unsigned int            superpage_size,
                                boolean_t               clear_map_aligned,
-                               boolean_t               is_submap);
+                               boolean_t               is_submap,
+                               boolean_t               used_for_jit,
+                               int                     alias);
 
 
 /*
@@ -588,11 +736,6 @@ extern vm_map_entry_t      vm_map_entry_insert(
                                                /* Physical map associated
                                                 * with this address map */
 
-#define                vm_map_verify_done(map, version)    vm_map_unlock_read(map)
-                                               /* Operation that required
-                                                * a verified lookup is
-                                                * now complete */
-
 /*
  * Macros/functions for map residence counts and swapin/out of vm maps
  */
@@ -620,7 +763,7 @@ MACRO_BEGIN                                 \
        if (Map) {                              \
                lck_mtx_lock(&Map->s_lock);     \
                Map->res_count++;               \
-               Map->ref_count++;               \
+               Map->map_refcnt++;              \
                lck_mtx_unlock(&Map->s_lock);   \
        }                                       \
 MACRO_END
@@ -655,7 +798,7 @@ MACRO_END
 MACRO_BEGIN                            \
        vm_map_t Map = (map);           \
        lck_mtx_lock(&Map->s_lock);     \
-       ++Map->ref_count;               \
+       ++Map->map_refcnt;              \
        vm_map_res_reference(Map);      \
        lck_mtx_unlock(&Map->s_lock);   \
 MACRO_END
@@ -674,7 +817,7 @@ MACRO_BEGIN                                 \
        vm_map_t Map = (map);                   \
        if (Map) {                              \
                lck_mtx_lock(&Map->s_lock);     \
-               Map->ref_count++;               \
+               Map->map_refcnt++;              \
                lck_mtx_unlock(&Map->s_lock);   \
        }                                       \
 MACRO_END
@@ -714,7 +857,7 @@ extern vm_object_t  vm_submap_object;
 
 #define        vm_map_dealloc_fast(map)                \
        MACRO_BEGIN                                     \
-       register int c;                         \
+       int c;                                          \
                                                        \
        lck_mtx_lock(&map->s_lock);                     \
        c = --map->ref_count;                   \
@@ -757,6 +900,8 @@ extern kern_return_t        vm_map_enter(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                vm_object_t             object,
                                vm_object_offset_t      offset,
                                boolean_t               needs_copy,
@@ -764,6 +909,23 @@ extern kern_return_t       vm_map_enter(
                                vm_prot_t               max_protection,
                                vm_inherit_t            inheritance);
 
+#if __arm64__
+extern kern_return_t   vm_map_enter_fourk(
+                               vm_map_t                map,
+                               vm_map_offset_t         *address,
+                               vm_map_size_t           size,
+                               vm_map_offset_t         mask,
+                               int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
+                               vm_object_t             object,
+                               vm_object_offset_t      offset,
+                               boolean_t               needs_copy,
+                               vm_prot_t               cur_protection,
+                               vm_prot_t               max_protection,
+                               vm_inherit_t            inheritance);
+#endif /* __arm64__ */
+
 /* XXX should go away - replaced with regular enter of contig object */
 extern  kern_return_t  vm_map_enter_cpm(
                                vm_map_t                map,
@@ -777,6 +939,8 @@ extern kern_return_t vm_map_remap(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                vm_map_t                src_map,
                                vm_map_offset_t         memory_address,
                                boolean_t               copy,
@@ -803,7 +967,11 @@ extern     kern_return_t   vm_map_read_user(
 /* Create a new task map using an existing task map as a template. */
 extern vm_map_t                vm_map_fork(
                                ledger_t                ledger,
-                               vm_map_t                old_map);
+                               vm_map_t                old_map,
+                               int                     options);
+#define VM_MAP_FORK_SHARE_IF_INHERIT_NONE      0x00000001
+#define VM_MAP_FORK_PRESERVE_PURGEABLE         0x00000002
+#define VM_MAP_FORK_CORPSE_FOOTPRINT           0x00000004
 
 /* Change inheritance */
 extern kern_return_t   vm_map_inherit(
@@ -833,12 +1001,6 @@ extern kern_return_t      vm_map_behavior_set(
                                vm_map_offset_t         end,
                                vm_behavior_t           new_behavior);
 
-extern kern_return_t vm_map_purgable_control(
-                               vm_map_t                map,
-                               vm_map_offset_t         address,
-                               vm_purgable_t           control,
-                               int                     *state);
-
 extern kern_return_t vm_map_region(
                                vm_map_t                 map,
                                vm_map_offset_t         *address,
@@ -866,7 +1028,9 @@ extern kern_return_t vm_map_query_volatile(
        vm_map_t        map,
        mach_vm_size_t  *volatile_virtual_size_p,
        mach_vm_size_t  *volatile_resident_size_p,
-       mach_vm_size_t  *volatile_pmap_size_p);
+       mach_vm_size_t  *volatile_compressed_size_p,
+       mach_vm_size_t  *volatile_pmap_size_p,
+       mach_vm_size_t  *volatile_compressed_pmap_size_p);
 
 extern kern_return_t   vm_map_submap(
                                vm_map_t                map,
@@ -904,7 +1068,65 @@ extern kern_return_t vm_map_set_cache_attr(
 
 extern int override_nx(vm_map_t map, uint32_t user_tag);
 
-extern int vm_map_purge(vm_map_t map);
+#if PMAP_CS
+extern kern_return_t vm_map_entry_cs_associate(
+       vm_map_t                map,
+       vm_map_entry_t          entry,
+       vm_map_kernel_flags_t   vmk_flags);
+#endif /* PMAP_CS */
+
+extern void vm_map_region_top_walk(
+        vm_map_entry_t entry,
+       vm_region_top_info_t top);
+extern void vm_map_region_walk(
+       vm_map_t map,
+       vm_map_offset_t va,
+       vm_map_entry_t entry,
+       vm_object_offset_t offset,
+       vm_object_size_t range,
+       vm_region_extended_info_t extended,
+       boolean_t look_for_pages,
+       mach_msg_type_number_t count);
+
+
+struct vm_map_corpse_footprint_header {
+       vm_size_t       cf_size;        /* allocated buffer size */
+       uint32_t        cf_last_region; /* offset of last region in buffer */
+       union {
+               uint32_t cfu_last_zeroes; /* during creation:
+                                         * number of "zero" dispositions at
+                                         * end of last region */
+               uint32_t cfu_hint_region; /* during lookup:
+                                         * offset of last looked up region */
+#define cf_last_zeroes cfu.cfu_last_zeroes
+#define cf_hint_region cfu.cfu_hint_region
+       } cfu;
+};
+struct vm_map_corpse_footprint_region {
+       vm_map_offset_t cfr_vaddr;      /* region start virtual address */
+       uint32_t        cfr_num_pages;  /* number of pages in this "region" */
+       unsigned char   cfr_disposition[0];     /* disposition of each page */
+} __attribute__((packed));
+
+extern kern_return_t vm_map_corpse_footprint_collect(
+       vm_map_t        old_map,
+       vm_map_entry_t  old_entry,
+       vm_map_t        new_map);
+extern void vm_map_corpse_footprint_collect_done(
+       vm_map_t        new_map);
+
+extern kern_return_t vm_map_corpse_footprint_query_page_info(
+       vm_map_t        map,
+       vm_map_offset_t va,
+       int             *disp);
+
+extern void vm_map_copy_footprint_ledgers(
+       task_t  old_task,
+       task_t  new_task);
+extern void vm_map_copy_ledger(
+       task_t  old_task,
+       task_t  new_task,
+       int     ledger_entry);
 
 #endif /* MACH_KERNEL_PRIVATE */
 
@@ -916,6 +1138,17 @@ extern vm_map_t           vm_map_create(
                                vm_map_offset_t         min_off,
                                vm_map_offset_t         max_off,
                                boolean_t               pageable);
+extern vm_map_t vm_map_create_options(
+       pmap_t                  pmap,
+       vm_map_offset_t         min_off,
+       vm_map_offset_t         max_off,
+       int                     options);
+#define VM_MAP_CREATE_PAGEABLE         0x00000001
+#define VM_MAP_CREATE_CORPSE_FOOTPRINT 0x00000002
+#define VM_MAP_CREATE_ALL_OPTIONS (VM_MAP_CREATE_PAGEABLE | \
+                                  VM_MAP_CREATE_CORPSE_FOOTPRINT)
+
+extern void            vm_map_disable_hole_optimization(vm_map_t map);
 
 /* Get rid of a map */
 extern void            vm_map_destroy(
@@ -945,6 +1178,43 @@ extern boolean_t vm_map_check_protection(
                                vm_prot_t               protection);
 
 /* wire down a region */
+
+#ifdef XNU_KERNEL_PRIVATE
+
+extern kern_return_t   vm_map_wire_kernel(
+                               vm_map_t                map,
+                               vm_map_offset_t         start,
+                               vm_map_offset_t         end,
+                               vm_prot_t               access_type,
+                               vm_tag_t                tag,
+                               boolean_t               user_wire);
+
+extern kern_return_t   vm_map_wire_and_extract_kernel(
+                               vm_map_t                map,
+                               vm_map_offset_t         start,
+                               vm_prot_t               access_type,
+                               vm_tag_t                tag,
+                               boolean_t               user_wire,
+                               ppnum_t                 *physpage_p);
+
+/* kext exported versions */
+
+extern kern_return_t   vm_map_wire_external(
+                               vm_map_t                map,
+                               vm_map_offset_t         start,
+                               vm_map_offset_t         end,
+                               vm_prot_t               access_type,
+                               boolean_t               user_wire);
+
+extern kern_return_t   vm_map_wire_and_extract_external(
+                               vm_map_t                map,
+                               vm_map_offset_t         start,
+                               vm_prot_t               access_type,
+                               boolean_t               user_wire,
+                               ppnum_t                 *physpage_p);
+
+#else /* XNU_KERNEL_PRIVATE */
+
 extern kern_return_t   vm_map_wire(
                                vm_map_t                map,
                                vm_map_offset_t         start,
@@ -959,6 +1229,8 @@ extern kern_return_t       vm_map_wire_and_extract(
                                boolean_t               user_wire,
                                ppnum_t                 *physpage_p);
 
+#endif /* !XNU_KERNEL_PRIVATE */
+
 /* unwire a region */
 extern kern_return_t   vm_map_unwire(
                                vm_map_t                map,
@@ -966,6 +1238,8 @@ extern kern_return_t       vm_map_unwire(
                                vm_map_offset_t         end,
                                boolean_t               user_wire);
 
+#ifdef XNU_KERNEL_PRIVATE
+
 /* Enter a mapping of a memory object */
 extern kern_return_t   vm_map_enter_mem_object(
                                vm_map_t                map,
@@ -973,6 +1247,8 @@ extern kern_return_t       vm_map_enter_mem_object(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                ipc_port_t              port,
                                vm_object_offset_t      offset,
                                boolean_t               needs_copy,
@@ -987,6 +1263,8 @@ extern kern_return_t       vm_map_enter_mem_object_prefault(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                ipc_port_t              port,
                                vm_object_offset_t      offset,
                                vm_prot_t               cur_protection,
@@ -1001,6 +1279,8 @@ extern kern_return_t      vm_map_enter_mem_object_control(
                                vm_map_size_t           size,
                                vm_map_offset_t         mask,
                                int                     flags,
+                               vm_map_kernel_flags_t   vmk_flags,
+                               vm_tag_t                tag,
                                memory_object_control_t control,
                                vm_object_offset_t      offset,
                                boolean_t               needs_copy,
@@ -1008,6 +1288,8 @@ extern kern_return_t      vm_map_enter_mem_object_control(
                                vm_prot_t               max_protection,
                                vm_inherit_t            inheritance);
 
+#endif /* !XNU_KERNEL_PRIVATE */
+
 /* Deallocate a region */
 extern kern_return_t   vm_map_remove(
                                vm_map_t                map,
@@ -1015,6 +1297,13 @@ extern kern_return_t     vm_map_remove(
                                vm_map_offset_t         end,
                                boolean_t               flags);
 
+/* Deallocate a region when the map is already locked */
+extern kern_return_t   vm_map_remove_locked(
+                               vm_map_t        map,
+                               vm_map_offset_t     start,
+                               vm_map_offset_t     end,
+                               boolean_t       flags);
+
 /* Discard a copy without using it */
 extern void            vm_map_copy_discard(
                                vm_map_copy_t           copy);
@@ -1026,16 +1315,29 @@ extern kern_return_t    vm_map_copy_overwrite(
                                vm_map_copy_t           copy,
                                boolean_t               interruptible);
 
+/* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
+extern boolean_t       vm_map_copy_validate_size(
+                               vm_map_t                dst_map,
+                               vm_map_copy_t           copy,
+                               vm_map_size_t           *size);
+
 /* Place a copy into a map */
 extern kern_return_t   vm_map_copyout(
                                vm_map_t                dst_map,
                                vm_map_address_t        *dst_addr,      /* OUT */
                                vm_map_copy_t           copy);
 
+extern kern_return_t vm_map_copyout_size(
+                               vm_map_t                dst_map,
+                               vm_map_address_t        *dst_addr,      /* OUT */
+                               vm_map_copy_t           copy,
+                               vm_map_size_t           copy_size);
+
 extern kern_return_t   vm_map_copyout_internal(
        vm_map_t                dst_map,
        vm_map_address_t        *dst_addr,      /* OUT */
        vm_map_copy_t           copy,
+       vm_map_size_t           copy_size,
        boolean_t               consume_on_success,
        vm_prot_t               cur_protection,
        vm_prot_t               max_protection,
@@ -1057,6 +1359,18 @@ extern kern_return_t     vm_map_copyin_common(
                                vm_map_copy_t           *copy_result,   /* OUT */
                                boolean_t               use_maxprot);
 
+#define VM_MAP_COPYIN_SRC_DESTROY      0x00000001
+#define VM_MAP_COPYIN_USE_MAXPROT      0x00000002
+#define VM_MAP_COPYIN_ENTRY_LIST       0x00000004
+#define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
+#define VM_MAP_COPYIN_ALL_FLAGS                0x0000000F
+extern kern_return_t   vm_map_copyin_internal(
+                               vm_map_t                src_map,
+                               vm_map_address_t        src_addr,
+                               vm_map_size_t           len,
+                               int                     flags,
+                               vm_map_copy_t           *copy_result); /* OUT */
+
 extern kern_return_t   vm_map_copy_extract(
        vm_map_t                src_map,
        vm_map_address_t        src_addr,
@@ -1078,12 +1392,23 @@ extern void             vm_map_set_64bit(
 extern void            vm_map_set_32bit(
                                vm_map_t                map);
 
+extern void            vm_map_set_jumbo(
+                               vm_map_t                map);
+
+extern void            vm_map_set_max_addr(
+                               vm_map_t                map, vm_map_offset_t new_max_offset);
+
 extern boolean_t       vm_map_has_hard_pagezero(
                                vm_map_t                map,
                                vm_map_offset_t         pagezero_size);
+extern void            vm_commit_pagezero_status(vm_map_t      tmap);
 
+#ifdef __arm__
+static inline boolean_t vm_map_is_64bit(__unused vm_map_t map) { return 0; }
+#else
 extern boolean_t       vm_map_is_64bit(
                                vm_map_t                map);
+#endif
 
 
 extern kern_return_t   vm_map_raise_max_offset(
@@ -1093,19 +1418,32 @@ extern kern_return_t    vm_map_raise_max_offset(
 extern kern_return_t   vm_map_raise_min_offset(
        vm_map_t        map,
        vm_map_offset_t new_min_offset);
+#if __x86_64__
+extern void vm_map_set_high_start(
+       vm_map_t        map,
+       vm_map_offset_t high_start);
+#endif /* __x86_64__ */
 
 extern vm_map_offset_t vm_compute_max_offset(
-                               unsigned                is64);
+                               boolean_t               is64);
+
+extern void            vm_map_get_max_aslr_slide_section(
+                               vm_map_t                map,
+                               int64_t                 *max_sections,
+                               int64_t                 *section_size);
 
 extern uint64_t        vm_map_get_max_aslr_slide_pages(
                                vm_map_t map);
-       
+
+extern uint64_t        vm_map_get_max_loader_aslr_slide_pages(
+                               vm_map_t map);
+
 extern void            vm_map_set_user_wire_limit(
                                vm_map_t                map,
                                vm_size_t               limit);
 
 extern void vm_map_switch_protect(
-                               vm_map_t                map, 
+                               vm_map_t                map,
                                boolean_t               val);
 
 extern void vm_map_iokit_mapped_region(
@@ -1122,7 +1460,7 @@ extern boolean_t first_free_is_valid(vm_map_t);
 extern int             vm_map_page_shift(
                                vm_map_t                map);
 
-extern int             vm_map_page_mask(
+extern vm_map_offset_t vm_map_page_mask(
                                vm_map_t                map);
 
 extern int             vm_map_page_size(
@@ -1136,6 +1474,10 @@ extern vm_map_offset_t   vm_map_trunc_page_mask(
                                vm_map_offset_t         offset,
                                vm_map_offset_t         mask);
 
+extern boolean_t       vm_map_page_aligned(
+                               vm_map_offset_t         offset,
+                               vm_map_offset_t         mask);
+
 #ifdef XNU_KERNEL_PRIVATE
 extern kern_return_t vm_map_page_info(
        vm_map_t                map,
@@ -1143,6 +1485,13 @@ extern kern_return_t vm_map_page_info(
        vm_page_info_flavor_t   flavor,
        vm_page_info_t          info,
        mach_msg_type_number_t  *count);
+extern kern_return_t vm_map_page_range_info_internal(
+       vm_map_t                map,
+       vm_map_offset_t         start_offset,
+       vm_map_offset_t         end_offset,
+       vm_page_info_flavor_t   flavor,
+       vm_page_info_t          info,
+       mach_msg_type_number_t  *count);
 #endif /* XNU_KERNEL_PRIVATE */
 
 
@@ -1181,6 +1530,21 @@ extern kern_return_t vm_map_page_info(
 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
 #define VM_MAP_PAGE_ALIGNED(x,pgmask) (((x) & (pgmask)) == 0)
 
+static inline void vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
+{ 
+       switch (prot) {
+               case MAP_MEM_NOOP:              break;
+               case MAP_MEM_IO:                *wimg = VM_WIMG_IO; break;
+               case MAP_MEM_COPYBACK:          *wimg = VM_WIMG_USE_DEFAULT; break;
+               case MAP_MEM_INNERWBACK:        *wimg = VM_WIMG_INNERWBACK; break;
+               case MAP_MEM_POSTED:            *wimg = VM_WIMG_POSTED; break;
+               case MAP_MEM_WTHRU:             *wimg = VM_WIMG_WTHRU; break;
+               case MAP_MEM_WCOMB:             *wimg = VM_WIMG_WCOMB; break;
+               default:
+                       panic("Unrecognized mapping type %u\n", prot);
+       }
+}
+
 #endif /* MACH_KERNEL_PRIVATE */
 
 #ifdef XNU_KERNEL_PRIVATE
@@ -1193,25 +1557,39 @@ extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
 /*
  * Flags for vm_map_remove() and vm_map_delete()
  */
-#define        VM_MAP_NO_FLAGS                 0x0
+#define        VM_MAP_REMOVE_NO_FLAGS          0x0
 #define        VM_MAP_REMOVE_KUNWIRE           0x1
 #define        VM_MAP_REMOVE_INTERRUPTIBLE     0x2
 #define        VM_MAP_REMOVE_WAIT_FOR_KWIRE    0x4
 #define VM_MAP_REMOVE_SAVE_ENTRIES     0x8
 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP  0x10
 #define VM_MAP_REMOVE_NO_MAP_ALIGN     0x20
+#define VM_MAP_REMOVE_NO_UNNESTING     0x40
+#define VM_MAP_REMOVE_IMMUTABLE                0x80
+#define VM_MAP_REMOVE_GAPS_OK          0x100
 
 /* Support for UPLs from vm_maps */
 
+#ifdef XNU_KERNEL_PRIVATE
+
 extern kern_return_t vm_map_get_upl(
                                vm_map_t                target_map,
                                vm_map_offset_t         map_offset,
                                upl_size_t              *size,
                                upl_t                   *upl,
                                upl_page_info_array_t   page_info,
-                               unsigned int    *page_infoCnt,
-                               int             *flags,
-                               int             force_data_sync);
+                               unsigned int            *page_infoCnt,
+                               upl_control_flags_t     *flags,
+                               vm_tag_t                tag,
+                               int                     force_data_sync);
+
+#endif /* XNU_KERNEL_PRIVATE */
+
+extern void
+vm_map_sizes(vm_map_t map,
+               vm_map_size_t * psize,
+               vm_map_size_t * pfree,
+               vm_map_size_t * plargest_free);
 
 #if CONFIG_DYNAMIC_CODE_SIGNING
 extern kern_return_t vm_map_sign(vm_map_t map, 
@@ -1224,35 +1602,47 @@ extern kern_return_t vm_map_partial_reap(
                unsigned int *reclaimed_resident,
                unsigned int *reclaimed_compressed);
 
-#if CONFIG_FREEZE
-void   vm_map_freeze_thaw_init(void);
-void   vm_map_freeze_thaw(void);
-void   vm_map_demand_fault(void);
 
-extern kern_return_t vm_map_freeze_walk(
-               vm_map_t map,
-               unsigned int *purgeable_count,
-               unsigned int *wired_count,
-               unsigned int *clean_count,
-               unsigned int *dirty_count,
-               unsigned int dirty_budget,
-               boolean_t *has_shared);
+#if DEVELOPMENT || DEBUG
+
+extern int vm_map_disconnect_page_mappings(
+               vm_map_t map,
+               boolean_t);
+#endif
+
+
+#if CONFIG_FREEZE
 
 extern kern_return_t vm_map_freeze(
-               vm_map_t map,
+               vm_map_t     map,
                unsigned int *purgeable_count,
                unsigned int *wired_count,
                unsigned int *clean_count,
                unsigned int *dirty_count,
                unsigned int dirty_budget,
-               boolean_t *has_shared);
-                
-extern kern_return_t vm_map_thaw(
-                vm_map_t map);
+                unsigned int *shared_count,
+               int          *freezer_error_code,
+               boolean_t    eval_only);
+
+
+#define FREEZER_ERROR_GENERIC                  (-1)
+#define FREEZER_ERROR_EXCESS_SHARED_MEMORY     (-2)
+#define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO (-3)
+#define FREEZER_ERROR_NO_COMPRESSOR_SPACE      (-4)
+#define FREEZER_ERROR_NO_SWAP_SPACE            (-5)
+
 #endif
 
 __END_DECLS
 
+/*
+ * In some cases, we don't have a real VM object but still want to return a
+ * unique ID (to avoid a memory region looking like shared memory), so build
+ * a fake pointer based on the map's ledger and the index of the ledger being
+ * reported.
+ */
+#define INFO_MAKE_FAKE_OBJECT_ID(map,ledger_id)        ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
+
 #endif /* KERNEL_PRIVATE */
  
 #endif /* _VM_VM_MAP_H_ */