]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_object.h
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / vm / vm_object.h
index 9c4fe0e32ba09004566a3d3e2605db40bbe7e12f..daef8471195f24126a2596eb8fc3fa1169852590 100644 (file)
@@ -79,7 +79,6 @@
 #include <mach/vm_param.h>
 #include <mach/machine/vm_types.h>
 #include <kern/queue.h>
-#include <kern/lock.h>
 #include <kern/locks.h>
 #include <kern/assert.h>
 #include <kern/misc_protos.h>
 #include <vm/vm_external.h>
 
 #include <vm/vm_options.h>
+#include <vm/vm_page.h>
+
+#if VM_OBJECT_TRACKING
+#include <libkern/OSDebug.h>
+#include <kern/btlog.h>
+extern void vm_object_tracking_init(void);
+extern boolean_t vm_object_tracking_inited;
+extern btlog_t *vm_object_tracking_btlog;
+#define VM_OBJECT_TRACKING_NUM_RECORDS 50000
+#define VM_OBJECT_TRACKING_BTDEPTH 7
+#define VM_OBJECT_TRACKING_OP_CREATED  1
+#define VM_OBJECT_TRACKING_OP_MODIFIED 2
+#define VM_OBJECT_TRACKING_OP_TRUESHARE        3
+#endif /* VM_OBJECT_TRACKING */
 
 struct vm_page;
 struct vm_shared_region_slide_info;
@@ -116,6 +129,7 @@ struct vm_object_fault_info {
        /* boolean_t */ mark_zf_absent:1,
        /* boolean_t */ batch_pmap_op:1,
                __vm_object_fault_info_unused_bits:26;
+       int             pmap_options;
 };
 
 
@@ -127,9 +141,25 @@ struct vm_object_fault_info {
 #define vo_slide_info                  vo_un2.vou_slide_info
 
 struct vm_object {
-       queue_head_t            memq;           /* Resident memory */
+       /*
+        * on 64 bit systems we pack the pointers hung off the memq.
+        * those pointers have to be able to point back to the memq.
+        * the packed pointers are required to be on a 64 byte boundary
+        * which means 2 things for the vm_object...  (1) the memq
+        * struct has to be the first element of the structure so that
+        * we can control it's alignment... (2) the vm_object must be
+        * aligned on a 64 byte boundary... for static vm_object's
+        * this is accomplished via the 'aligned' attribute... for
+        * vm_object's in the zone pool, this is accomplished by
+        * rounding the size of the vm_object element to the nearest
+        * 64 byte size before creating the zone.
+        */
+       vm_page_queue_head_t    memq;           /* Resident memory - must be first */
         lck_rw_t               Lock;           /* Synchronization */
 
+#if DEVELOPMENT || DEBUG
+       thread_t                Lock_owner;
+#endif
        union {
                vm_object_size_t  vou_size;     /* Object size (only valid if internal) */
                int               vou_cache_pages_to_scan;      /* pages yet to be visited in an
@@ -139,9 +169,6 @@ struct vm_object {
 
        struct vm_page          *memq_hint;
        int                     ref_count;      /* Number of references */
-#if    TASK_SWAPPER
-       int                     res_count;      /* Residency references (swap)*/
-#endif /* TASK_SWAPPER */
        unsigned int            resident_page_count;
                                                /* number of resident pages */
        unsigned int            wired_page_count; /* number of wired pages */
@@ -175,7 +202,27 @@ struct vm_object {
        memory_object_copy_strategy_t
                                copy_strategy;  /* How to handle data copy */
 
-       short                   paging_in_progress;
+#if __LP64__
+       /*
+        * Some user processes (mostly VirtualMachine software) take a large
+        * number of UPLs (via IOMemoryDescriptors) to wire pages in large
+        * VM objects and overflow the 16-bit "activity_in_progress" counter.
+        * Since we never enforced any limit there, let's give them 32 bits
+        * for backwards compatibility's sake.
+        */
+       unsigned int            paging_in_progress:16,
+                               __object1_unused_bits:16;
+       unsigned int            activity_in_progress;
+#else /* __LP64__ */
+       /*
+        * On 32-bit platforms, enlarging "activity_in_progress" would increase
+        * the size of "struct vm_object".  Since we don't know of any actual
+        * overflow of these counters on these platforms, let's keep the
+        * counters as 16-bit integers.
+        */
+       unsigned short          paging_in_progress;
+       unsigned short          activity_in_progress;
+#endif /* __LP64__ */
                                                /* The memory object ports are
                                                 * being used (e.g., for pagein
                                                 * or pageout) -- don't change
@@ -183,7 +230,6 @@ struct vm_object {
                                                 * don't collapse, destroy or
                                                 * terminate)
                                                 */
-       short                   activity_in_progress;
 
        unsigned int
        /* boolean_t array */   all_wanted:11,  /* Bit array of "want to be
@@ -283,8 +329,6 @@ struct vm_object {
                                                 * primary caching. (for
                                                 * I/O)
                                                 */
-                                               
-
 
        queue_chain_t           cached_list;    /* Attachment point for the
                                                 * list of objects cached as a
@@ -304,10 +348,6 @@ struct vm_object {
 
         uint32_t               pages_created;
         uint32_t               pages_used;
-#if    MACH_PAGEMAP
-       vm_external_map_t       existence_map;  /* bitmap of pages written to
-                                                * backing storage */
-#endif /* MACH_PAGEMAP */
        vm_offset_t             cow_hint;       /* last page present in     */
                                                /* shadow but not in object */
 #if    MACH_ASSERT
@@ -325,6 +365,7 @@ struct vm_object {
                hashed:1,               /* object/pager entered in hash */
                transposed:1,           /* object was transposed with another */
                mapping_in_progress:1,  /* pager being mapped/unmapped */
+               phantom_isssd:1,
                volatile_empty:1,
                volatile_fault:1,
                all_reusable:1,
@@ -333,13 +374,25 @@ struct vm_object {
                object_slid:1,
                purgeable_queue_type:2,
                purgeable_queue_group:3,
-               __object2_unused_bits:9;        /* for expansion */
-
-       uint32_t                scan_collisions;
-
-#if    UPL_DEBUG
+               io_tracking:1,
+#if CONFIG_SECLUDED_MEMORY
+               eligible_for_secluded:1,
+               can_grab_secluded:1,
+#else /* CONFIG_SECLUDED_MEMORY */
+               __object3_unused_bits:2,
+#endif /* CONFIG_SECLUDED_MEMORY */
+               __object2_unused_bits:5;        /* for expansion */
+
+       uint8_t                 scan_collisions;
+        vm_tag_t               wire_tag;
+       uint8_t                 __object4_unused_bits[2];
+
+#if CONFIG_PHANTOM_CACHE
+       uint32_t                phantom_object_id;
+#endif
+#if CONFIG_IOSCHED || UPL_DEBUG
        queue_head_t            uplq;           /* List of outstanding upls */
-#endif /* UPL_DEBUG */
+#endif
 
 #ifdef VM_PIP_DEBUG
 /*
@@ -354,6 +407,12 @@ struct vm_object {
 #endif /* VM_PIP_DEBUG  */
 
         queue_chain_t          objq;      /* object queue - currently used for purgable queues */
+
+#if DEBUG
+       void *purgeable_owner_bt[16];
+       task_t vo_purgeable_volatilizer; /* who made it volatile? */
+       void *purgeable_volatilizer_bt[16];
+#endif /* DEBUG */
 };
 
 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object)                                \
@@ -361,34 +420,6 @@ struct vm_object {
         ((object)->purgable == VM_PURGABLE_VOLATILE ||                 \
          (object)->purgable == VM_PURGABLE_EMPTY))
 
-#define VM_PAGE_REMOVE(page)                                           \
-       MACRO_BEGIN                                                     \
-       vm_page_t __page = (page);                                      \
-       vm_object_t __object = __page->object;                          \
-       if (__page == __object->memq_hint) {                            \
-               vm_page_t       __new_hint;                             \
-               queue_entry_t   __qe;                                   \
-               __qe = queue_next(&__page->listq);                      \
-               if (queue_end(&__object->memq, __qe)) {                 \
-                       __qe = queue_prev(&__page->listq);              \
-                       if (queue_end(&__object->memq, __qe)) {         \
-                               __qe = NULL;                            \
-                       }                                               \
-               }                                                       \
-               __new_hint = (vm_page_t) __qe;                          \
-               __object->memq_hint = __new_hint;                       \
-       }                                                               \
-       queue_remove(&__object->memq, __page, vm_page_t, listq);        \
-       MACRO_END
-
-#define VM_PAGE_INSERT(page, object)                           \
-       MACRO_BEGIN                                             \
-       vm_page_t __page = (page);                              \
-       vm_object_t __object = (object);                        \
-       queue_enter(&__object->memq, __page, vm_page_t, listq); \
-       __object->memq_hint = __page;                           \
-       MACRO_END
-
 extern
 vm_object_t    kernel_object;          /* the single kernel object */
 
@@ -439,6 +470,88 @@ extern lck_attr_t          vm_map_lck_attr;
 #define msr_lock(msr)   lck_mtx_lock(&(msr)->msync_req_lock)
 #define msr_unlock(msr) lck_mtx_unlock(&(msr)->msync_req_lock)
 
+#define VM_OBJECT_WIRED(object)                                                \
+    MACRO_BEGIN                                                                \
+    if ((object)->purgable == VM_PURGABLE_DENY)                                \
+    {                                                                  \
+       lck_spin_lock(&vm_objects_wired_lock);                          \
+       assert(!(object)->objq.next);                                   \
+       assert(!(object)->objq.prev);                                   \
+       queue_enter(&vm_objects_wired, (object), vm_object_t, objq);    \
+       lck_spin_unlock(&vm_objects_wired_lock);                        \
+    }                                                                  \
+    MACRO_END
+
+#define VM_OBJECT_UNWIRED(object)                                       \
+    MACRO_BEGIN                                                                 \
+    (object)->wire_tag = VM_KERN_MEMORY_NONE;                           \
+    if (((object)->purgable == VM_PURGABLE_DENY) && (object)->objq.next) \
+    {                                                                   \
+       lck_spin_lock(&vm_objects_wired_lock);                           \
+       queue_remove(&vm_objects_wired, (object), vm_object_t, objq);    \
+       lck_spin_unlock(&vm_objects_wired_lock);                         \
+    }                                                                   \
+    MACRO_END
+
+
+#define OBJECT_LOCK_SHARED     0
+#define OBJECT_LOCK_EXCLUSIVE  1
+
+extern lck_grp_t       vm_object_lck_grp;
+extern lck_grp_attr_t  vm_object_lck_grp_attr;
+extern lck_attr_t      vm_object_lck_attr;
+extern lck_attr_t      kernel_object_lck_attr;
+extern lck_attr_t      compressor_object_lck_attr;
+
+extern vm_object_t     vm_pageout_scan_wants_object;
+
+extern void            vm_object_lock(vm_object_t);
+extern boolean_t       vm_object_lock_try(vm_object_t);
+extern boolean_t       _vm_object_lock_try(vm_object_t);
+extern boolean_t       vm_object_lock_avoid(vm_object_t);
+extern void            vm_object_lock_shared(vm_object_t);
+extern boolean_t       vm_object_lock_try_shared(vm_object_t);
+extern void            vm_object_unlock(vm_object_t);
+extern boolean_t       vm_object_lock_upgrade(vm_object_t);
+
+/*
+ *     Object locking macros
+ */
+
+#define vm_object_lock_init(object)                                    \
+       lck_rw_init(&(object)->Lock, &vm_object_lck_grp,                \
+                   (((object) == kernel_object ||                      \
+                     (object) == vm_submap_object) ?                   \
+                    &kernel_object_lck_attr :                          \
+                    (((object) == compressor_object) ?                 \
+                    &compressor_object_lck_attr :                      \
+                     &vm_object_lck_attr)))
+#define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
+
+#define vm_object_lock_try_scan(object)        _vm_object_lock_try(object)
+
+/*
+ * CAUTION: the following vm_object_lock_assert_held*() macros merely
+ * check if anyone is holding the lock, but the holder may not necessarily
+ * be the caller...
+ */
+#if MACH_ASSERT || DEBUG
+#define vm_object_lock_assert_held(object) \
+       lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
+#define vm_object_lock_assert_shared(object) \
+       lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
+#define vm_object_lock_assert_exclusive(object) \
+       lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
+#define vm_object_lock_assert_notheld(object) \
+       lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
+#else  /* MACH_ASSERT || DEBUG */ 
+#define vm_object_lock_assert_held(object)
+#define vm_object_lock_assert_shared(object)
+#define vm_object_lock_assert_exclusive(object)
+#define vm_object_lock_assert_notheld(object)
+#endif /* MACH_ASSERT || DEBUG */
+
+
 /*
  *     Declare procedures that operate on VM objects.
  */
@@ -451,8 +564,7 @@ __private_extern__ void             vm_object_init_lck_grp(void);
 
 __private_extern__ void                vm_object_reaper_init(void);
 
-__private_extern__ vm_object_t vm_object_allocate(
-                                       vm_object_size_t        size);
+__private_extern__ vm_object_t vm_object_allocate(vm_object_size_t size);
 
 __private_extern__ void    _vm_object_allocate(vm_object_size_t size,
                            vm_object_t object);
@@ -549,7 +661,9 @@ __private_extern__ void             vm_object_deactivate_pages(
                                        vm_object_offset_t      offset,
                                        vm_object_size_t        size,
                                        boolean_t               kill_page,
-                                       boolean_t               reusable_page);
+                                       boolean_t               reusable_page,
+                                       struct pmap             *pmap,
+                                       vm_map_offset_t         pmap_offset);
 
 __private_extern__ void        vm_object_reuse_pages(
        vm_object_t             object,
@@ -558,7 +672,8 @@ __private_extern__ void     vm_object_reuse_pages(
        boolean_t               allow_partial_reuse);
 
 __private_extern__ void                vm_object_purge(
-                                       vm_object_t             object);
+                                      vm_object_t              object,
+                                      int                      flags);
 
 __private_extern__ kern_return_t vm_object_purgable_control(
        vm_object_t     object,
@@ -645,7 +760,7 @@ __private_extern__ kern_return_t vm_object_upl_request(
                                upl_t                   *upl,
                                upl_page_info_t         *page_info,
                                unsigned int            *count,
-                               int                     flags);
+                               upl_control_flags_t     flags);
 
 __private_extern__ kern_return_t vm_object_transpose(
                                vm_object_t             object1,
@@ -733,36 +848,33 @@ __private_extern__ void           vm_object_reap_pages(
 #define REAP_DATA_FLUSH        3
 
 #if CONFIG_FREEZE
-struct default_freezer_handle;
-
-__private_extern__ kern_return_t 
-vm_object_pack(
-       unsigned int            *purgeable_count,
-       unsigned int            *wired_count,
-       unsigned int            *clean_count,
-       unsigned int            *dirty_count,
-       unsigned int            dirty_budget,
-       boolean_t               *shared,
-       vm_object_t             src_object,
-       struct default_freezer_handle *df_handle);
 
 __private_extern__ void
-vm_object_pack_pages(
-       unsigned int            *wired_count,
-       unsigned int            *clean_count,
-       unsigned int            *dirty_count,
-       unsigned int            dirty_budget,
-       vm_object_t             src_object,
-       struct default_freezer_handle *df_handle);
+vm_object_compressed_freezer_pageout(
+       vm_object_t     object);
+
+__private_extern__ void
+vm_object_compressed_freezer_done(
+       void);
+
+#endif /* CONFIG_FREEZE */
 
 __private_extern__ void
 vm_object_pageout(
        vm_object_t     object);
 
-__private_extern__  kern_return_t
-vm_object_pagein(
-       vm_object_t     object);
-#endif /* CONFIG_FREEZE */
+#if CONFIG_IOSCHED
+struct io_reprioritize_req {
+       uint64_t        blkno;
+       uint32_t        len;
+       int             priority;
+       struct vnode    *devvp;
+       queue_chain_t   io_reprioritize_list;
+};
+typedef struct io_reprioritize_req *io_reprioritize_req_t;
+
+extern void vm_io_reprioritize_init(void);
+#endif
 
 /*
  *     Event waiting handling
@@ -779,37 +891,114 @@ vm_object_pagein(
 #define VM_OBJECT_EVENT_UNBLOCKED              8
 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS        9
 
-#define        vm_object_assert_wait(object, event, interruptible)             \
-       (((object)->all_wanted |= 1 << (event)),                        \
-        assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible)))
-
-#define        vm_object_wait(object, event, interruptible)                    \
-       (vm_object_assert_wait((object),(event),(interruptible)),       \
-       vm_object_unlock(object),                                       \
-       thread_block(THREAD_CONTINUE_NULL))                             \
+#define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */
 
-#define thread_sleep_vm_object(object, event, interruptible)           \
-        lck_rw_sleep(&(object)->Lock, LCK_SLEEP_DEFAULT, (event_t)(event), (interruptible))
+static __inline__ wait_result_t
+vm_object_assert_wait(
+       vm_object_t             object,
+       int                     event,
+       wait_interrupt_t        interruptible)
+{
+       wait_result_t wr;
+
+       vm_object_lock_assert_exclusive(object);
+       assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
+
+       object->all_wanted |= 1 << event;
+       wr = assert_wait((event_t)((vm_offset_t)object + event),
+                        interruptible);
+       return wr;
+}
+
+static __inline__ wait_result_t
+vm_object_wait(
+       vm_object_t             object,
+       int                     event,
+       wait_interrupt_t        interruptible)
+{
+       wait_result_t wr;
+
+       vm_object_assert_wait(object, event, interruptible);
+       vm_object_unlock(object);
+       wr = thread_block(THREAD_CONTINUE_NULL);
+       return wr;
+}
+
+static __inline__ wait_result_t
+thread_sleep_vm_object(
+       vm_object_t             object,
+       event_t                 event,
+       wait_interrupt_t        interruptible)
+{
+       wait_result_t wr;
+
+#if DEVELOPMENT || DEBUG
+       if (object->Lock_owner != current_thread())
+               panic("thread_sleep_vm_object: now owner - %p\n", object);
+       object->Lock_owner = 0;
+#endif
+        wr = lck_rw_sleep(&object->Lock,
+                         LCK_SLEEP_PROMOTED_PRI,
+                         event,
+                         interruptible);
+#if DEVELOPMENT || DEBUG
+       object->Lock_owner = current_thread();
+#endif
+       return wr;
+}
 
-#define vm_object_sleep(object, event, interruptible)                  \
-       (((object)->all_wanted |= 1 << (event)),                        \
-        thread_sleep_vm_object((object),                               \
-               ((vm_offset_t)(object)+(event)), (interruptible)))
+static __inline__ wait_result_t
+vm_object_sleep(
+       vm_object_t             object,
+       int                     event,
+       wait_interrupt_t        interruptible)
+{
+       wait_result_t wr;
+
+       vm_object_lock_assert_exclusive(object);
+       assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
+
+       object->all_wanted |= 1 << event;
+       wr = thread_sleep_vm_object(object,
+                                   (event_t)((vm_offset_t)object + event),
+                                   interruptible);
+       return wr;
+}
+
+static __inline__ void
+vm_object_wakeup(
+       vm_object_t             object,
+       int                     event)
+{
+       vm_object_lock_assert_exclusive(object);
+       assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
+
+       if (object->all_wanted & (1 << event))
+               thread_wakeup((event_t)((vm_offset_t)object + event));
+       object->all_wanted &= ~(1 << event);
+}
+
+static __inline__ void
+vm_object_set_wanted(
+       vm_object_t             object,
+       int                     event)
+{
+       vm_object_lock_assert_exclusive(object);
+       assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
 
-#define        vm_object_wakeup(object, event)                                 \
-       MACRO_BEGIN                                                     \
-       if ((object)->all_wanted & (1 << (event)))                      \
-               thread_wakeup((event_t)((vm_offset_t)(object) + (event))); \
-       (object)->all_wanted &= ~(1 << (event));                        \
-       MACRO_END
+       object->all_wanted |= (1 << event);
+}
 
-#define        vm_object_set_wanted(object, event)                             \
-       MACRO_BEGIN                                                     \
-       ((object)->all_wanted |= (1 << (event)));                       \
-       MACRO_END
+static __inline__ int
+vm_object_wanted(
+       vm_object_t             object,
+       int                     event)
+{
+       vm_object_lock_assert_held(object);
+       assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
 
-#define        vm_object_wanted(object, event)                                 \
-       ((object)->all_wanted & (1 << (event)))
+       return object->all_wanted & (1 << event);
+}
 
 /*
  *     Routines implemented as macros
@@ -832,15 +1021,19 @@ vm_object_pagein(
 #define                vm_object_activity_begin(object)                        \
        MACRO_BEGIN                                                     \
        vm_object_lock_assert_exclusive((object));                      \
-       assert((object)->paging_in_progress >= 0);                      \
        VM_PIP_DEBUG_BEGIN((object));                                   \
        (object)->activity_in_progress++;                               \
+       if ((object)->activity_in_progress == 0) {                      \
+               panic("vm_object_activity_begin(%p): overflow\n", (object));\
+       }                                                               \
        MACRO_END
 
 #define                vm_object_activity_end(object)                          \
        MACRO_BEGIN                                                     \
        vm_object_lock_assert_exclusive((object));                      \
-       assert((object)->activity_in_progress > 0);                     \
+       if ((object)->activity_in_progress == 0) {                      \
+               panic("vm_object_activity_end(%p): underflow\n", (object));\
+       }                                                               \
        (object)->activity_in_progress--;                               \
        if ((object)->paging_in_progress == 0 &&                        \
            (object)->activity_in_progress == 0)                        \
@@ -851,15 +1044,19 @@ vm_object_pagein(
 #define                vm_object_paging_begin(object)                          \
        MACRO_BEGIN                                                     \
        vm_object_lock_assert_exclusive((object));                      \
-       assert((object)->paging_in_progress >= 0);                      \
        VM_PIP_DEBUG_BEGIN((object));                                   \
        (object)->paging_in_progress++;                                 \
+       if ((object)->paging_in_progress == 0) {                        \
+               panic("vm_object_paging_begin(%p): overflow\n", (object));\
+       }                                                               \
        MACRO_END
 
 #define                vm_object_paging_end(object)                            \
        MACRO_BEGIN                                                     \
        vm_object_lock_assert_exclusive((object));                      \
-       assert((object)->paging_in_progress > 0);                       \
+       if ((object)->paging_in_progress == 0) {                        \
+               panic("vm_object_paging_end(%p): underflow\n", (object));\
+       }                                                               \
        (object)->paging_in_progress--;                                 \
        if ((object)->paging_in_progress == 0) {                        \
                vm_object_wakeup((object),                              \
@@ -935,60 +1132,6 @@ vm_object_pagein(
 
 
 
-#define OBJECT_LOCK_SHARED     0
-#define OBJECT_LOCK_EXCLUSIVE  1
-
-extern lck_grp_t       vm_object_lck_grp;
-extern lck_grp_attr_t  vm_object_lck_grp_attr;
-extern lck_attr_t      vm_object_lck_attr;
-extern lck_attr_t      kernel_object_lck_attr;
-extern lck_attr_t      compressor_object_lck_attr;
-
-extern vm_object_t     vm_pageout_scan_wants_object;
-
-extern void            vm_object_lock(vm_object_t);
-extern boolean_t       vm_object_lock_try(vm_object_t);
-extern boolean_t       _vm_object_lock_try(vm_object_t);
-extern boolean_t       vm_object_lock_avoid(vm_object_t);
-extern void            vm_object_lock_shared(vm_object_t);
-extern boolean_t       vm_object_lock_try_shared(vm_object_t);
-
-/*
- *     Object locking macros
- */
-
-#define vm_object_lock_init(object)                                    \
-       lck_rw_init(&(object)->Lock, &vm_object_lck_grp,                \
-                   (((object) == kernel_object ||                      \
-                     (object) == vm_submap_object) ?                   \
-                    &kernel_object_lck_attr :                          \
-                    (((object) == compressor_object) ?                 \
-                    &compressor_object_lck_attr :                      \
-                     &vm_object_lck_attr)))
-#define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
-
-#define vm_object_unlock(object)       lck_rw_done(&(object)->Lock)
-#define vm_object_lock_upgrade(object) lck_rw_lock_shared_to_exclusive(&(object)->Lock)
-#define vm_object_lock_try_scan(object)        _vm_object_lock_try(object)
-
-/*
- * CAUTION: the following vm_object_lock_assert_held*() macros merely
- * check if anyone is holding the lock, but the holder may not necessarily
- * be the caller...
- */
-#if MACH_ASSERT || DEBUG
-#define vm_object_lock_assert_held(object) \
-       lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
-#define vm_object_lock_assert_shared(object)   \
-       lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
-#define vm_object_lock_assert_exclusive(object) \
-       lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
-#else  /* MACH_ASSERT || DEBUG */ 
-#define vm_object_lock_assert_held(object)
-#define vm_object_lock_assert_shared(object)
-#define vm_object_lock_assert_exclusive(object)
-#endif /* MACH_ASSERT || DEBUG */
-
 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))