]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_object.h
xnu-2782.10.72.tar.gz
[apple/xnu.git] / osfmk / vm / vm_object.h
index 8ad7db64e0af3460a449f93d776afe6bb3ee0fcc..9462329d78db0da0597730cb492d7dfd6cf52c25 100644 (file)
@@ -66,6 +66,8 @@
 #ifndef        _VM_VM_OBJECT_H_
 #define _VM_VM_OBJECT_H_
 
+#include <debug.h>
+#include <mach_assert.h>
 #include <mach_pagemap.h>
 #include <task_swapper.h>
 
@@ -77,7 +79,6 @@
 #include <mach/vm_param.h>
 #include <mach/machine/vm_types.h>
 #include <kern/queue.h>
-#include <kern/lock.h>
 #include <kern/locks.h>
 #include <kern/assert.h>
 #include <kern/misc_protos.h>
 #include <ipc/ipc_types.h>
 #include <vm/pmap.h>
 
-#if    MACH_PAGEMAP
 #include <vm/vm_external.h>
-#endif /* MACH_PAGEMAP */
 
 #include <vm/vm_options.h>
 
+#if VM_OBJECT_TRACKING
+#include <libkern/OSDebug.h>
+#include <kern/btlog.h>
+extern void vm_object_tracking_init(void);
+extern boolean_t vm_object_tracking_inited;
+extern btlog_t *vm_object_tracking_btlog;
+#define VM_OBJECT_TRACKING_BTDEPTH 7
+#define VM_OBJECT_TRACKING_OP_CREATED  1
+#define VM_OBJECT_TRACKING_OP_MODIFIED 2
+#define VM_OBJECT_TRACKING_OP_TRUESHARE        3
+#endif /* VM_OBJECT_TRACKING */
+
 struct vm_page;
+struct vm_shared_region_slide_info;
 
 /*
  *     Types defined:
@@ -107,19 +119,36 @@ struct vm_object_fault_info {
         vm_behavior_t  behavior;
         vm_map_offset_t        lo_offset;
        vm_map_offset_t hi_offset;
-       boolean_t       no_cache;
-       boolean_t       stealth;
+       unsigned int
+       /* boolean_t */ no_cache:1,
+       /* boolean_t */ stealth:1,
+       /* boolean_t */ io_sync:1,
+       /* boolean_t */ cs_bypass:1,
+       /* boolean_t */ mark_zf_absent:1,
+       /* boolean_t */ batch_pmap_op:1,
+               __vm_object_fault_info_unused_bits:26;
+       int             pmap_options;
 };
 
 
+#define        vo_size                         vo_un1.vou_size
+#define vo_cache_pages_to_scan         vo_un1.vou_cache_pages_to_scan
+#define vo_shadow_offset               vo_un2.vou_shadow_offset
+#define vo_cache_ts                    vo_un2.vou_cache_ts
+#define vo_purgeable_owner             vo_un2.vou_purgeable_owner
+#define vo_slide_info                  vo_un2.vou_slide_info
 
 struct vm_object {
        queue_head_t            memq;           /* Resident memory */
         lck_rw_t               Lock;           /* Synchronization */
 
-       vm_object_size_t        size;           /* Object size (only valid
-                                                * if internal)
-                                                */
+       union {
+               vm_object_size_t  vou_size;     /* Object size (only valid if internal) */
+               int               vou_cache_pages_to_scan;      /* pages yet to be visited in an
+                                                                * external object in cache
+                                                                */
+       } vo_un1;
+
        struct vm_page          *memq_hint;
        int                     ref_count;      /* Number of references */
 #if    TASK_SWAPPER
@@ -138,7 +167,18 @@ struct vm_object {
                                                 * copy_call.
                                                 */
        struct vm_object        *shadow;        /* My shadow */
-       vm_object_offset_t      shadow_offset;  /* Offset into shadow */
+
+       union {
+               vm_object_offset_t vou_shadow_offset;   /* Offset into shadow */
+               clock_sec_t     vou_cache_ts;   /* age of an external object
+                                                * present in cache
+                                                */
+               task_t          vou_purgeable_owner;    /* If the purg'a'ble bits below are set 
+                                                        * to volatile/emtpy, this is the task 
+                                                        * that owns this purgeable object.
+                                                        */
+               struct vm_shared_region_slide_info *vou_slide_info;
+       } vo_un2;
 
        memory_object_t         pager;          /* Where to get data */
        vm_object_offset_t      paging_offset;  /* Offset into memory object */
@@ -147,7 +187,27 @@ struct vm_object {
        memory_object_copy_strategy_t
                                copy_strategy;  /* How to handle data copy */
 
-       short                   paging_in_progress;
+#if __LP64__
+       /*
+        * Some user processes (mostly VirtualMachine software) take a large
+        * number of UPLs (via IOMemoryDescriptors) to wire pages in large
+        * VM objects and overflow the 16-bit "activity_in_progress" counter.
+        * Since we never enforced any limit there, let's give them 32 bits
+        * for backwards compatibility's sake.
+        */
+       unsigned int            paging_in_progress:16,
+                               __object1_unused_bits:16;
+       unsigned int            activity_in_progress;
+#else /* __LP64__ */
+       /*
+        * On 32-bit platforms, enlarging "activity_in_progress" would increase
+        * the size of "struct vm_object".  Since we don't know of any actual
+        * overflow of these counters on these platforms, let's keep the
+        * counters as 16-bit integers.
+        */
+       unsigned short          paging_in_progress;
+       unsigned short          activity_in_progress;
+#endif /* __LP64__ */
                                                /* The memory object ports are
                                                 * being used (e.g., for pagein
                                                 * or pageout) -- don't change
@@ -155,7 +215,6 @@ struct vm_object {
                                                 * don't collapse, destroy or
                                                 * terminate)
                                                 */
-       short                   activity_in_progress;
 
        unsigned int
        /* boolean_t array */   all_wanted:11,  /* Bit array of "want to be
@@ -200,11 +259,10 @@ struct vm_object {
        /* boolean_t */         purgable:2,     /* Purgable state.  See
                                                 * VM_PURGABLE_* 
                                                 */
+       /* boolean_t */         purgeable_when_ripe:1, /* Purgeable when a token
+                                                       * becomes ripe.
+                                                       */
        /* boolean_t */         shadowed:1,     /* Shadow may exist */
-       /* boolean_t */         silent_overwrite:1,
-                                               /* Allow full page overwrite
-                                                * without data_request if
-                                                * page is absent */
        /* boolean_t */         advisory_pageout:1,
                                                /* Instead of sending page
                                                 * via OOL, just notify
@@ -298,15 +356,25 @@ struct vm_object {
                hashed:1,               /* object/pager entered in hash */
                transposed:1,           /* object was transposed with another */
                mapping_in_progress:1,  /* pager being mapped/unmapped */
+               phantom_isssd:1,
                volatile_empty:1,
                volatile_fault:1,
                all_reusable:1,
                blocked_access:1,
-               __object2_unused_bits:16;       /* for expansion */
-
-#if    UPL_DEBUG
+               set_cache_attr:1,
+               object_slid:1,
+               purgeable_queue_type:2,
+               purgeable_queue_group:3,
+               io_tracking:1,
+               __object2_unused_bits:7;        /* for expansion */
+
+       uint32_t                scan_collisions;
+#if CONFIG_PHANTOM_CACHE
+       uint32_t                phantom_object_id;
+#endif
+#if CONFIG_IOSCHED || UPL_DEBUG
        queue_head_t            uplq;           /* List of outstanding upls */
-#endif /* UPL_DEBUG */
+#endif
 
 #ifdef VM_PIP_DEBUG
 /*
@@ -320,7 +388,13 @@ struct vm_object {
        } pip_holders[VM_PIP_DEBUG_MAX_REFS];
 #endif /* VM_PIP_DEBUG  */
 
-        queue_chain_t       objq;      /* object queue - currently used for purgable queues */
+        queue_chain_t          objq;      /* object queue - currently used for purgable queues */
+
+#if DEBUG
+       void *purgeable_owner_bt[16];
+       task_t vo_purgeable_volatilizer; /* who made it volatile? */
+       void *purgeable_volatilizer_bt[16];
+#endif /* DEBUG */
 };
 
 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object)                                \
@@ -356,10 +430,13 @@ struct vm_object {
        __object->memq_hint = __page;                           \
        MACRO_END
 
-__private_extern__
+extern
 vm_object_t    kernel_object;          /* the single kernel object */
 
-__private_extern__
+extern
+vm_object_t    compressor_object;      /* the single compressor object */
+
+extern
 unsigned int   vm_object_absent_max;   /* maximum number of absent pages
                                           at a time for each object */
 
@@ -395,7 +472,10 @@ extern lck_attr_t          vm_map_lck_attr;
     MACRO_END
 
 #define msync_req_free(msr)                                            \
-       (kfree((msr), sizeof(struct msync_req)))
+    MACRO_BEGIN                                                                \
+        lck_mtx_destroy(&(msr)->msync_req_lock, &vm_map_lck_grp);      \
+       kfree((msr), sizeof(struct msync_req));                         \
+    MACRO_END
 
 #define msr_lock(msr)   lck_mtx_lock(&(msr)->msync_req_lock)
 #define msr_unlock(msr) lck_mtx_unlock(&(msr)->msync_req_lock)
@@ -404,7 +484,7 @@ extern lck_attr_t           vm_map_lck_attr;
  *     Declare procedures that operate on VM objects.
  */
 
-__private_extern__ void                vm_object_bootstrap(void) __attribute__((section("__TEXT, initcode")));
+__private_extern__ void                vm_object_bootstrap(void);
 
 __private_extern__ void                vm_object_init(void);
 
@@ -453,7 +533,7 @@ __private_extern__ void     vm_object_res_deallocate(
        vm_object_lock_assert_shared(object);                           \
        assert((RLObject)->ref_count > 0);                              \
        OSAddAtomic(1, &(RLObject)->ref_count);         \
-       assert((RLObject)->ref_count > 1);                              \
+       assert((RLObject)->ref_count > 0);                              \
        /* XXX we would need an atomic version of the following ... */  \
        vm_object_res_reference(RLObject);                              \
        MACRO_END
@@ -491,6 +571,15 @@ __private_extern__ void            vm_object_pmap_protect(
                                        vm_map_offset_t         pmap_start,
                                        vm_prot_t               prot);
 
+__private_extern__ void                vm_object_pmap_protect_options(
+                                       vm_object_t             object,
+                                       vm_object_offset_t      offset,
+                                       vm_object_size_t        size,
+                                       pmap_t                  pmap,
+                                       vm_map_offset_t         pmap_start,
+                                       vm_prot_t               prot,
+                                       int                     options);
+
 __private_extern__ void                vm_object_page_remove(
                                        vm_object_t             object,
                                        vm_object_offset_t      start,
@@ -510,13 +599,21 @@ __private_extern__ void   vm_object_reuse_pages(
        boolean_t               allow_partial_reuse);
 
 __private_extern__ void                vm_object_purge(
-                                       vm_object_t             object);
+                                      vm_object_t              object,
+                                      int                      flags);
 
 __private_extern__ kern_return_t vm_object_purgable_control(
        vm_object_t     object,
        vm_purgable_t   control,
        int             *state);
 
+__private_extern__ kern_return_t vm_object_get_page_counts(
+       vm_object_t             object,
+       vm_object_offset_t      offset,
+       vm_object_size_t        size,
+       unsigned int            *resident_page_count,
+       unsigned int            *dirty_page_count);
+
 __private_extern__ boolean_t   vm_object_coalesce(
                                        vm_object_t             prev_object,
                                        vm_object_t             next_object,
@@ -572,6 +669,9 @@ __private_extern__ kern_return_t    vm_object_destroy(
 __private_extern__ void                vm_object_pager_create(
                                        vm_object_t     object);
 
+__private_extern__ void                vm_object_compressor_pager_create(
+                                       vm_object_t     object);
+
 __private_extern__ void                vm_object_page_map(
                                vm_object_t     object,
                                vm_object_offset_t      offset,
@@ -643,6 +743,10 @@ __private_extern__ kern_return_t vm_object_populate_with_private(
        ppnum_t                 phys_page,
        vm_size_t               size);
 
+__private_extern__ void vm_object_change_wimg_mode(
+       vm_object_t             object,
+       unsigned int            wimg_mode);
+
 extern kern_return_t adjust_vm_object_cache(
        vm_size_t oval,
        vm_size_t nval);
@@ -670,6 +774,50 @@ __private_extern__ void            vm_object_reap_pages(
 #define REAP_PURGEABLE 2
 #define REAP_DATA_FLUSH        3
 
+#if CONFIG_FREEZE
+struct default_freezer_handle;
+
+__private_extern__ kern_return_t 
+vm_object_pack(
+       unsigned int            *purgeable_count,
+       unsigned int            *wired_count,
+       unsigned int            *clean_count,
+       unsigned int            *dirty_count,
+       unsigned int            dirty_budget,
+       boolean_t               *shared,
+       vm_object_t             src_object,
+       struct default_freezer_handle *df_handle);
+
+__private_extern__ void
+vm_object_pack_pages(
+       unsigned int            *wired_count,
+       unsigned int            *clean_count,
+       unsigned int            *dirty_count,
+       unsigned int            dirty_budget,
+       vm_object_t             src_object,
+       struct default_freezer_handle *df_handle);
+
+__private_extern__ void
+vm_object_pageout(
+       vm_object_t     object);
+
+__private_extern__  kern_return_t
+vm_object_pagein(
+       vm_object_t     object);
+#endif /* CONFIG_FREEZE */
+
+#if CONFIG_IOSCHED
+struct io_reprioritize_req {
+       uint64_t        blkno;
+       uint32_t        len;
+       int             priority;
+       struct vnode    *devvp;
+       queue_chain_t   io_reprioritize_list;
+};
+typedef struct io_reprioritize_req *io_reprioritize_req_t;
+
+extern void vm_io_reprioritize_init(void);
+#endif
 
 /*
  *     Event waiting handling
@@ -696,7 +844,7 @@ __private_extern__ void             vm_object_reap_pages(
        thread_block(THREAD_CONTINUE_NULL))                             \
 
 #define thread_sleep_vm_object(object, event, interruptible)           \
-        lck_rw_sleep(&(object)->Lock, LCK_SLEEP_DEFAULT, (event_t)(event), (interruptible))
+        lck_rw_sleep(&(object)->Lock, LCK_SLEEP_PROMOTED_PRI, (event_t)(event), (interruptible))
 
 #define vm_object_sleep(object, event, interruptible)                  \
        (((object)->all_wanted |= 1 << (event)),                        \
@@ -739,15 +887,19 @@ __private_extern__ void           vm_object_reap_pages(
 #define                vm_object_activity_begin(object)                        \
        MACRO_BEGIN                                                     \
        vm_object_lock_assert_exclusive((object));                      \
-       assert((object)->paging_in_progress >= 0);                      \
        VM_PIP_DEBUG_BEGIN((object));                                   \
        (object)->activity_in_progress++;                               \
+       if ((object)->activity_in_progress == 0) {                      \
+               panic("vm_object_activity_begin(%p): overflow\n", (object));\
+       }                                                               \
        MACRO_END
 
 #define                vm_object_activity_end(object)                          \
        MACRO_BEGIN                                                     \
        vm_object_lock_assert_exclusive((object));                      \
-       assert((object)->activity_in_progress > 0);                     \
+       if ((object)->activity_in_progress == 0) {                      \
+               panic("vm_object_activity_end(%p): underflow\n", (object));\
+       }                                                               \
        (object)->activity_in_progress--;                               \
        if ((object)->paging_in_progress == 0 &&                        \
            (object)->activity_in_progress == 0)                        \
@@ -758,15 +910,19 @@ __private_extern__ void           vm_object_reap_pages(
 #define                vm_object_paging_begin(object)                          \
        MACRO_BEGIN                                                     \
        vm_object_lock_assert_exclusive((object));                      \
-       assert((object)->paging_in_progress >= 0);                      \
        VM_PIP_DEBUG_BEGIN((object));                                   \
        (object)->paging_in_progress++;                                 \
+       if ((object)->paging_in_progress == 0) {                        \
+               panic("vm_object_paging_begin(%p): overflow\n", (object));\
+       }                                                               \
        MACRO_END
 
 #define                vm_object_paging_end(object)                            \
        MACRO_BEGIN                                                     \
        vm_object_lock_assert_exclusive((object));                      \
-       assert((object)->paging_in_progress > 0);                       \
+       if ((object)->paging_in_progress == 0) {                        \
+               panic("vm_object_paging_end(%p): underflow\n", (object));\
+       }                                                               \
        (object)->paging_in_progress--;                                 \
        if ((object)->paging_in_progress == 0) {                        \
                vm_object_wakeup((object),                              \
@@ -849,6 +1005,7 @@ extern lck_grp_t   vm_object_lck_grp;
 extern lck_grp_attr_t  vm_object_lck_grp_attr;
 extern lck_attr_t      vm_object_lck_attr;
 extern lck_attr_t      kernel_object_lck_attr;
+extern lck_attr_t      compressor_object_lck_attr;
 
 extern vm_object_t     vm_pageout_scan_wants_object;
 
@@ -868,7 +1025,9 @@ extern boolean_t   vm_object_lock_try_shared(vm_object_t);
                    (((object) == kernel_object ||                      \
                      (object) == vm_submap_object) ?                   \
                     &kernel_object_lck_attr :                          \
-                    &vm_object_lck_attr))
+                    (((object) == compressor_object) ?                 \
+                    &compressor_object_lck_attr :                      \
+                     &vm_object_lck_attr)))
 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
 
 #define vm_object_unlock(object)       lck_rw_done(&(object)->Lock)
@@ -880,20 +1039,24 @@ extern boolean_t vm_object_lock_try_shared(vm_object_t);
  * check if anyone is holding the lock, but the holder may not necessarily
  * be the caller...
  */
-#if DEBUG
+#if MACH_ASSERT || DEBUG
 #define vm_object_lock_assert_held(object) \
        lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
 #define vm_object_lock_assert_shared(object)   \
        lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
 #define vm_object_lock_assert_exclusive(object) \
        lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
-#else /* DEBUG */
+#else  /* MACH_ASSERT || DEBUG */ 
 #define vm_object_lock_assert_held(object)
 #define vm_object_lock_assert_shared(object)
 #define vm_object_lock_assert_exclusive(object)
-#endif /* DEBUG */
+#endif /* MACH_ASSERT || DEBUG */
 
 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
 
+extern void    vm_object_cache_add(vm_object_t);
+extern void    vm_object_cache_remove(vm_object_t);
+extern int     vm_object_cache_evict(int, int);
+
 #endif /* _VM_VM_OBJECT_H_ */